*/ int main(int argc, char *argv[]) { DSP_cvStartDSP(); CvCapture * capture; IplImage *videoFrame, *convFrame, *convOpencvFrame, *unsignedFrame; IplImage *dataImage, *integralImage; int key; int *ptr; float *flPtr; int i,j; float tempFloat=0.0; float *floatDataPtr; float *floatOutPtr; /* Data to test cvIntegral() */ unsigned char intdata[] = { 151, 57, 116, 170, 9, 247, 208, 140, 150, 60, 88, 77, 4, 6, 162, 6, 31, 143, 178, 3, 135, 91, 54, 154, 193, 161, 20, 162, 137, 150, 128, 224, 214, 113, 9, 28, 53, 211, 98, 217, 149, 233, 231, 127, 115, 203, 177, 42, 62, 155, 3, 103, 127, 16, 135, 131, 211, 158, 9, 2, 106, 227, 249, 255 }; //16 x 4 if ( argc < 2 ) { printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T [option] \n"); printf("option:\ni. integral\ns. sobel\nd. dft\n"); printf("Following are the all usage:\n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T i dsp(To test integral-image algorithm using DSP. Input is from webcam). Note: You need to install VLIB to test this.\n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T i arm(To test integral-image algorithm using ARM. Input is from webcam). \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T i test(To test integral-image algorithm using test data given in APP. Input is from webcam). \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T d dsp (To test DFT algorithm using DSP) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T d arm (To test DFT algorithm using ARM) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi dsp (To test sobel algorithm for movie clip tree.avi using DSP) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi arm (To test sobel algorithm for movie clip tree.avi using ARM) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T s webcam dsp (To test sobel algorithm for image from webcam using DSP) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T s webcam arm (To test sobel algorithm for image from webcam using ARM) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T rgb2gray (To test RGB to Gray for image from webcam.) \n"); return (-1); } if (*argv[1] == 's' && argc < 3) { printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi \n"); printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T s webcam \n"); return (-1); } switch (*argv[1]) { case 'i': switch (*argv[2]) { case 'd': { //'d' dor DSP accelerated cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width+1, convFrame->height+1 ), IPL_DEPTH_32S, 1 ); /* Process the first frame outside the loop*/ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); DSP_cvSyncDSP(); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); /*Find integral image */ DSP_cvIntegral(convFrame,convOpencvFrame,NULL,NULL); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } DSP_cvSyncDSP(); /* Do color conversion */ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* show Image */ //Since I am using VLIB for IntegralImage, its output image width and height is same as source image. convOpencvFrame->width -= 1; convOpencvFrame->height -= 1; convOpencvFrame->widthStep -= sizeof(int) * convOpencvFrame->nChannels; cvShowImage("video", convOpencvFrame); convOpencvFrame->width += 1; convOpencvFrame->height += 1; convOpencvFrame->widthStep += sizeof(int) * convOpencvFrame->nChannels; /* Sync with DSP */ DSP_cvSyncDSP(); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); cvReleaseCapture(&capture); } break; /* End of sobel test */ case 'a': { // 'a' for ARM side cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width+1, convFrame->height+1 ), IPL_DEPTH_32S, 1 ); /* Process the first frame outside the loop*/ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); /*Find integral image */ cvIntegral(convFrame,convOpencvFrame,NULL,NULL); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Do color conversion */ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* show Image */ cvShowImage("video", convOpencvFrame); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); cvReleaseCapture(&capture); } break; /* End of sobel test */ case 't': { /* This to test the consistency of algorithm */ /* Start of integral image test */ dataImage = cvCreateImageHeader(cvSize(16, 4), IPL_DEPTH_8U, 1); integralImage = cvCreateImageHeader(cvSize(17, 5), IPL_DEPTH_32S, 1); unsigned char *data = (unsigned char*)cvAlloc(16*4*sizeof(char)); unsigned int *sum = (unsigned int *)cvAlloc(17*5*sizeof(int)); memcpy(data, &intdata[0], 16*4*sizeof(char)); memset(sum, 0, 17*5*sizeof(int)); dataImage->imageData = ( char *)data; integralImage->imageData = ( char *)sum; /* DSP based integral */ DSP_cvIntegral(dataImage,integralImage,NULL,NULL); ptr = (int *)integralImage->imageData; printf(" The integral image is:\n"); for (i=0;i<4;i++){ for (j=0;j<16;j++){ printf("%d \t ", ptr[i* 16 + j]); } printf("\n"); } /* Arm based cvIntegral() */ cvIntegral(dataImage, integralImage,NULL,NULL); ptr = (int *)integralImage->imageData; printf(" The integral image is:\n"); for (i=0;i<5;i++){ for (j=0;j<17;j++){ printf("%d \t ", ptr[i* 17 + j]); } printf("\n"); } cvFree(&dataImage); cvFree(&integralImage); } break; /* End of integral image test */ default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T i d \n./remote_ti_platforms_evm3530_opencv.xv5T i a\n ./remote_ti_platforms_evm3530_opencv.xv5T i d \n"); break; } break; /* Start RGB to Y test */ case 'r': { cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); unsignedFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); /* Process the first frame outside the loop*/ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } DSP_cvSyncDSP(); cvShowImage("video",convFrame); Time_delta(&sTime,&time); printf("Total execution time1 = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); } break; case 's': /* Start of sobel test */ switch (*argv[2]) { case 't': switch(*argv[3]) { case 'd': { //'d' dor DSP accelerated cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCreateFileCapture(argv[2]); if ( !capture) { printf("Error: Video not found\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading video\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 ); DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); DSP_cvSyncDSP(); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); DSP_cvSobel(convFrame,convOpencvFrame,1,1,3); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Sync with DSP*/ DSP_cvSyncDSP(); /* Start RGB To Y conversion of next frame */ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* Display Filtered Image */ cvShowImage("video", convOpencvFrame); /* Sync DSP */ DSP_cvSyncDSP(); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); } break; /* End of sobel test */ case 'a': { // 'a' for ARM side cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCreateFileCapture(argv[2]); if ( !capture) { printf("Error: Video not found\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading video\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 ); cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); cvSobel(convFrame,convOpencvFrame,1,1,3); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Start RGB To Y conversion of next frame */ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* Display Filtered Image */ cvShowImage("video", convOpencvFrame); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); } break; /* End of sobel test */ default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi d \n./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi a\n"); } break; case 'w': switch(*argv[3]) { case 'd': { //'d' dor DSP accelerated cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 ); /* Create unsigned image */ unsignedFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* Process the first frame outside the loop*/ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); DSP_cvSyncDSP(); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); DSP_cvSobel(convFrame,convOpencvFrame,1,1,3); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Sync with DSP*/ DSP_cvSyncDSP(); /* Start RGB To Y conversion of next frame */ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* Convert signed image to unsign image for better clarity */ cvConvert(convOpencvFrame,unsignedFrame); /* Display Filtered Image */ cvShowImage("video", unsignedFrame); /* Sync DSP */ DSP_cvSyncDSP(); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); } break; /* End of sobel test */ case 'a': { // 'a' for ARM side cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 ); /* Create unsigned image */ unsignedFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* Process the first frame outside the loop*/ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); cvSobel(convFrame,convOpencvFrame,1,1,3); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Start RGB To Y conversion of next frame */ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* Convert signed image to unsign image for better clarity */ cvConvert(convOpencvFrame,unsignedFrame); /* Display Filtered Image */ cvShowImage("video", unsignedFrame); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); } break; /* End of sobel test */ default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T s webcam d \n./remote_ti_platforms_evm3530_opencv.xv5T s webcam a\n"); } break; default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi \n./remote_ti_platforms_evm3530_opencv.xv5T s webcam\n"); break; } break; case 'd': switch(*argv[2]) { case 'd': { //'d' dor DSP accelerated int row =63; int col =63; floatDataPtr = (float *)cvAlloc(sizeof(float)*row*col* 2); floatOutPtr = (float *)cvAlloc(sizeof(float)*row*col * 2); dataImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2); dataImage->imageData = (char *)floatDataPtr; integralImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2); integralImage->imageData = (char *)floatOutPtr; for (i=0; i< row * col * 2; i+=2) { tempFloat += 1.0; floatDataPtr[i] = tempFloat; floatDataPtr[i+1] = 0.0; } Time_reset(&sTime); Time_delta(&sTime,&time); DSP_cvDFT(dataImage,integralImage,CV_DXT_FORWARD,0); Time_delta(&sTime,&time); DSP_cvSyncDSP(); /* Print the output data for DFT */ flPtr = (float *)integralImage->imageData; printf(" The DFT output is:\n"); for (i=0;i<integralImage->height;i++){ key = 0; for (j=0;j<integralImage->width * 2;j+=2){ key++; printf("%f + i%f\t", flPtr[(i*integralImage->width * 2)+j], flPtr[(i*integralImage->width * 2) + j + 1]); if ((key % 5) == 0) printf("\n"); } printf("\n"); } printf("DSP_cvDFT Total execution time = %dus\n",(unsigned int)time); cvFree(&floatDataPtr); cvFree(&floatOutPtr); } break; case 'a': { int row =63; int col =63; floatDataPtr = (float *)cvAlloc(sizeof(float)*row*col*2); floatOutPtr = (float *)cvAlloc(sizeof(float)*row*col*2); dataImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2); dataImage->imageData = (char *)floatDataPtr; integralImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2); integralImage->imageData = (char *)floatOutPtr; for (i=0; i< row * col * 2; i+=2) { tempFloat += 1.0; floatDataPtr[i] = tempFloat; floatDataPtr[i+1] = 0.0; } Time_reset(&sTime); Time_delta(&sTime,&time); cvDFT(dataImage,integralImage,CV_DXT_FORWARD,0); Time_delta(&sTime,&time); /* Print the output data for DFT */ flPtr = (float *)integralImage->imageData; printf(" The DFT output is:\n"); for (i=0;i<integralImage->height;i++){ key = 0; for (j=0;j<integralImage->width * 2;j+=2){ key++; printf("%f + i%f\t", flPtr[(i*integralImage->width * 2)+j], flPtr[(i*integralImage->width * 2) + j + 1]); if ((key % 5) == 0) printf("\n"); } printf("\n"); } printf("cvDFT Total execution time = %dus\n",(unsigned int)time); cvFree(&floatDataPtr); cvFree(&floatOutPtr); } break; default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T d d \n./remote_ti_platforms_evm3530_opencv.xv5T d a\n"); } // end of latest switch(*argv[3]) break; default: return -1; } DSP_cvEndDSP(); return 0;
int main(int argc, char** argv) { profile_name = (argc > 1 ? argv[1] : (char*)"blue_goal.yml"); int cam = (argc > 2 ? atoi(argv[2]) : 0); // value loading fs = cvOpenFileStorage(profile_name, 0, CV_STORAGE_READ, NULL); Hmax = cvReadIntByName(fs, NULL, "Hmax", Hmax); Smax = cvReadIntByName(fs, NULL, "Smax", Smax); Vmax = cvReadIntByName(fs, NULL, "Vmax", Vmax); Hmin = cvReadIntByName(fs, NULL, "Hmin", Hmin); Smin = cvReadIntByName(fs, NULL, "Smin", Smin); Vmin = cvReadIntByName(fs, NULL, "Vmin", Vmin); minH = cvReadIntByName(fs, NULL, "minH", minH); cvNamedWindow("img", CV_WINDOW_AUTOSIZE); cvNamedWindow("treshed", CV_WINDOW_AUTOSIZE); cvNamedWindow("graph", CV_WINDOW_AUTOSIZE); cvCreateTrackbar("Hmin", "treshed", &Hmin, 360, onTrack); cvCreateTrackbar("Smin", "treshed", &Smin, 255, onTrack); cvCreateTrackbar("Vmin", "treshed", &Vmin, 255, onTrack); cvCreateTrackbar("Hmax", "treshed", &Hmax, 360, onTrack); cvCreateTrackbar("Smax", "treshed", &Smax, 255, onTrack); cvCreateTrackbar("Vmax", "treshed", &Vmax, 255, onTrack); cvCreateTrackbar("minH", "treshed", &minH, 255, onTrack); onTrack(0); CvCapture* camera = cvCaptureFromCAM(cam); while(1){ img = cvQueryFrame(camera); allocateCvImage(&imgHSV, cvGetSize(img), 8, 3); cvCvtColor(img, imgHSV, CV_BGR2HSV); allocateCvImage(&imgThreshed, cvGetSize(img), 8, 1); cvInRangeS(imgHSV, cvScalar(Hmin, Smin, Vmin, 0), cvScalar(Hmax, Smax, Vmax, 0), imgThreshed); cvErode(imgThreshed, imgThreshed, 0, 2); int width = imgThreshed->width; int height = imgThreshed->height; int nchannels = imgThreshed->nChannels; int step = imgThreshed->widthStep; uchar* data = (uchar *)imgThreshed->imageData; unsigned int graph[width]; int x,y; for(x = 0; x < width ; x++) graph[x] = 0; int sum = 0, notnull = 0; for(x = 0; x < width; x++){ for( y = 0 ; y < height ; y++ ) { if(data[y*step + x*nchannels] == 255){ graph[x]++; } } sum += graph[x]; if(graph[x] != 0) notnull += 1; // printf("%d\t%d\n", x, graph[x]); } if(notnull == 0) notnull = 1; int average = sum/notnull; if(average == 0) average = 1; float pix = 12.0/average; printf("\n sum: %d average: %d\n",sum,average); int first = 0, last = 0; // looking for goal for(x = 0;x < width; x++){ if(graph[x] >= average && graph[x-1] < average){ cvLine(img, cvPoint(x, 0), cvPoint(x, height), cvScalar(255, 255, 0, 0), 1, 0, 0); if(first == 0) first = x; } if(graph[x] >= average && graph[x+1] < average){ cvLine(img, cvPoint(x, 0), cvPoint(x, height), cvScalar(255, 255, 0, 0), 1, 0, 0); last = x; } } float goal = pix*(last-first); float error = (goal-60.0)/60.0*100.0; printf("Pix: %f; Goal in cm: %f; Error: %f%\n", pix, goal, error); // image center cvLine(img, cvPoint(width/2, 0), cvPoint(width/2, height), cvScalar(0, 255, 255, 0), 1, 0, 0); int gCenter = (last+first) / 2; // X float X = ((width/2) - gCenter)*pix; printf("X: %f +- %f\n",X, abs(error)/100.0*X); // goal center cvLine(img,cvPoint(gCenter, 0),cvPoint(gCenter, height), cvScalar(0, 0, 255, 0), 1, 0, 0); cvShowImage("img", img); cvShowImage("treshed", imgThreshed); //cvShowImage("graph", imgGraph); cvWaitKey(10); } }
int main() { // Initialize capturing live feed from the camera CvCapture* capture = 0; capture = cvCaptureFromCAM(0); // Couldn't get a device? Throw an error and quit if(!capture) { printf("Could not initialize capturing...\n"); return -1; } // The two windows we'll be using cvNamedWindow("video"); cvNamedWindow("thresh"); // This image holds the "scribble" data... // the tracked positions of the ball IplImage* imgScribble = NULL; // An infinite loop while(true) { // Will hold a frame captured from the camera IplImage* frame = 0; frame = cvQueryFrame(capture); // If we couldn't grab a frame... quit if(!frame) break; // If this is the first frame, we need to initialize it if(imgScribble == NULL) { imgScribble = cvCreateImage(cvGetSize(frame), 8, 3); } // Holds the yellow thresholded image (yellow = white, rest = black) IplImage* imgYellowThresh = GetThresholdedImage(frame); // Calculate the moments to estimate the position of the ball CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgYellowThresh, moments, 1); // The actual moment values double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // Holding the last and current ball positions static int posX = 0; static int posY = 0; int lastX = posX; int lastY = posY; posX = moment10/area; posY = moment01/area; // Print it out for debugging purposes printf("position (%d,%d)\n", posX, posY); // We want to draw a line only if its a valid position if(lastX>0 && lastY>0 && posX>0 && posY>0) { // Draw a yellow line from the previous point to the current point cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5); } // Add the scribbling image and the frame... and we get a combination of the two cvAdd(frame, imgScribble, frame); cvShowImage("thresh", imgYellowThresh); cvShowImage("video", frame); // Wait for a keypress int c = cvWaitKey(10); if(c!=-1) { // If pressed, break out of the loop break; } // Release the thresholded image... we need no memory leaks.. please cvReleaseImage(&imgYellowThresh); delete moments; } // We're done using the camera. Other applications can now use it cvReleaseCapture(&capture); return 0; }
int main (int argc, char *argv[]) { GtkBuilder *builder; GtkWidget *window, *window_paint, * fullscreen; GtkWidget *canvas, *canvas_screen; GdkGLConfig *gl_config; //we need to initialize all these functions so that gtk knows //to be thread-aware if (!g_thread_supported ()){ g_thread_init(NULL); } gdk_threads_init(); gdk_threads_enter(); gtk_init (&argc, &argv); //opengl // gtk_gl_init (&argc, &argv); // gl_config = gdk_gl_config_new_by_mode (GDK_GL_MODE_RGB | GDK_GL_MODE_ALPHA | GDK_GL_MODE_DEPTH | GDK_GL_MODE_DOUBLE); builder = gtk_builder_new (); gtk_builder_add_from_file (builder, "gui/settings.builder", NULL); window = GTK_WIDGET (gtk_builder_get_object (builder, "window")); canvas = GTK_WIDGET (gtk_builder_get_object (builder, "canvas")); window_paint = GTK_WIDGET (gtk_builder_get_object (builder, "windowscreen")); canvas_screen = GTK_WIDGET (gtk_builder_get_object (builder, "canvasscreen")); fullscreen = GTK_WIDGET (gtk_builder_get_object (builder, "fullscreen")); gtk_builder_connect_signals (builder, NULL); g_object_unref (G_OBJECT (builder)); //signals for event g_signal_connect (G_OBJECT (canvas), "expose-event", G_CALLBACK (paints_video), NULL ); g_signal_connect (G_OBJECT (canvas_screen), "expose-event", G_CALLBACK (paints_anime), NULL ); g_signal_connect(G_OBJECT(window), "destroy", G_CALLBACK(on_window_destroy), NULL ); //g_signal_connect(G_OBJECT(window_paint), "expose_event", G_CALLBACK(on_window_expose_event), NULL); // g_signal_connect(G_OBJECT(window_paint), "configure_event", G_CALLBACK(on_window_configure_event), NULL); g_signal_connect(G_OBJECT(fullscreen), "clicked", G_CALLBACK(click_fullcreen ), (gpointer) window_paint ); gtk_widget_set_size_request(canvas_screen,500,500); // opencv CvCapture * capture = cvCaptureFromCAM(-1); detec = new Detection(capture); //we can turn off gtk's automatic painting and double buffering routines. //gtk_widget_set_app_paintable(window_paint, TRUE); //gtk_widget_set_double_buffered(window_paint, FALSE); g_timeout_add(D_TIMER_ANIME, (GSourceFunc) timer_anime2, (gpointer) canvas_screen); g_timeout_add(D_TIMER_VIDEO, (GSourceFunc) timer_frame, (gpointer) canvas); gtk_widget_show (window_paint); gtk_widget_show(window); // time_handler(window); //pixmap = gdk_pixmap_new(window_paint->window,500,500,-1); gtk_main (); gdk_threads_leave(); return 0; }
/** * @function main */ int main2() { CvCapture* capture; cv::Mat frame; // Load the cascades if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; }; cv::namedWindow(main_window_name,CV_WINDOW_NORMAL); cv::moveWindow(main_window_name, 400, 100); cv::namedWindow(face_window_name,CV_WINDOW_NORMAL); cv::moveWindow(face_window_name, 10, 100); cv::namedWindow("Right Eye",CV_WINDOW_NORMAL); cv::moveWindow("Right Eye", 10, 600); cv::namedWindow("Left Eye",CV_WINDOW_NORMAL); cv::moveWindow("Left Eye", 10, 800); createCornerKernels(); ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2), 43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1); // Read the video stream capture = cvCaptureFromCAM( -1 ); if( capture ) { while( true ) { frame = cvQueryFrame( capture ); // mirror it cv::flip(frame, frame, 1); frame.copyTo(debugImage); int index = -1; // Apply the classifier to the frame if( !frame.empty() ) { index =detectAndDisplay( frame ); } else { printf(" --(!) No captured frame -- Break!"); break; } if (index >0) { char temp[5]; sprintf(temp, "%d", index); string s(temp); Text(debugImage, "Index: "+s+" OK", 100, 100); } imshow(main_window_name,debugImage); int c = cv::waitKey(10); if( (char)c == 'c' ) { break; } if( (char)c == 'f' ) { imwrite("frame.png",frame); } } } releaseCornerKernels(); return 0; }
void MainWindow::OpticalFlowDetect() { cvReleaseCapture(&pCapture); pCapture=cvCaptureFromCAM(0); int corner_count = 1000; CvTermCriteria criteria; criteria = cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 64, 0.01); IplImage *src_img1; IplImage *src_img2; IplImage *dst_img; IplImage *pre_img; IplImage *result; IplImage *eig_img; IplImage *temp_img; IplImage *prev_pyramid; IplImage *curr_pyramid; CvPoint2D32f *corners1; CvPoint2D32f *corners2; corners1 = (CvPoint2D32f *) cvAlloc (corner_count * sizeof (CvPoint2D32f)); corners2 = (CvPoint2D32f *) cvAlloc (corner_count * sizeof (CvPoint2D32f)); char *status; status = (char *) cvAlloc (corner_count); while (1) { pre_img = cvQueryFrame(pCapture); CvSize img_sz = cvGetSize(pre_img); src_img1 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1); cvCvtColor(pre_img, src_img1, CV_RGB2GRAY); dst_img = cvQueryFrame(pCapture); src_img2 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1); cvCvtColor(dst_img, src_img2, CV_RGB2GRAY); result=cvCreateImage(img_sz,IPL_DEPTH_8U,1); cvZero(result); eig_img = cvCreateImage (img_sz, IPL_DEPTH_32F, 1); temp_img = cvCreateImage (img_sz, IPL_DEPTH_32F, 1); prev_pyramid = cvCreateImage (cvSize (src_img1->width + 8, src_img1->height / 3), IPL_DEPTH_8U, 1); curr_pyramid = cvCreateImage (cvSize (src_img1->width + 8, src_img1->height / 3), IPL_DEPTH_8U, 1); cvGoodFeaturesToTrack (src_img1, eig_img, temp_img, corners1, &corner_count, 0.001, 5, NULL); cvCalcOpticalFlowPyrLK (src_img1, src_img2, prev_pyramid, curr_pyramid, corners1, corners2, corner_count, cvSize (10, 10), 4, status, NULL, criteria, 0); for (int i = 0; i < corner_count; i++) { if (status[i]) cvLine (dst_img, cvPointFrom32f (corners1[i]), cvPointFrom32f (corners2[i]), CV_RGB (255, 0, 0), 1, CV_AA, 0); } if(27==cvWaitKey(33)) break; // cvCvtScale(dst_img,result,1.0/255,0); MainWindow::Display(pre_img,src_img2,dst_img); } }
int _tmain(int argc, char* argv[]) { CvCapture* pCapture = NULL; if(argc == 2) { char* _tempname = "e:\\201505280048_22.mp4"; if( !(pCapture = cvCaptureFromFile(_tempname))) { fprintf(stderr, "Can not open video file %s\n", argv[1]); return -2; } } if (argc == 1) { if( !(pCapture = cvCaptureFromCAM(1))) { fprintf(stderr, "Can not open camera.\n"); return -2; } } IplImage* pFrame = NULL; int countx=0; while (pFrame =cvQueryFrame(pCapture)) { countx++; IplImage* img1 = cvCreateImage(cvGetSize(pFrame), IPL_DEPTH_8U, 1);//创建目标图像 cvCvtColor(pFrame,img1,CV_BGR2GRAY);//cvCvtColor(src,des,CV_BGR2GRAY) //边缘检测 cv::Mat result(img1); cv::Mat contours; cv::Canny (result,contours,50,150); img1 =&IplImage(contours); int nVer = 1; int nHor = 2; IplConvKernel* VerKer; IplConvKernel* HorKer; VerKer = cvCreateStructuringElementEx(1,nVer,0,nVer/2,CV_SHAPE_RECT); HorKer = cvCreateStructuringElementEx(nHor,1,nHor/2,0,CV_SHAPE_RECT); cvDilate(img1,img1,VerKer); cvDilate(img1,img1,HorKer); cvMorphologyEx(img1, img1, NULL, NULL, CV_MOP_CLOSE); cvSaveImage("a.jpg",img1); cv::Mat image(pFrame); LineFinder finder; finder.setMinVote (600); finder.setLineLengthAndGap (680,500); std::vector<cv::Vec4i> li; li = finder.findLines (contours); finder.drawDetectedLines (image); imwrite("123.jpg",image); //选择第一条直线 //黑色的图像 // for(int i = 0; i < li.size();i++) // { // int n= i; // cv::Mat oneLine(image.size(),CV_8U,cv::Scalar(0)); // cv::Mat oneLineInv; // //白线 // line(oneLine,cv::Point(li[n][0],li[n][1]),cv::Point(li[n][2],li[n][3]),cv::Scalar(255),5); // //将轮廓与白线按位与 // bitwise_and(contours,oneLine,oneLine); // threshold(oneLine,oneLineInv,128,255,cv::THRESH_BINARY_INV); // //把点集中的点插入到向量中 // std::vector<cv::Point> points; // //遍历每个像素 // for(int y = 0; y < oneLine.rows;y++) // { // uchar* rowPtr = oneLine.ptr<uchar>(y); // for(int x = 0;x < oneLine.cols;x++) // { // if(rowPtr[x]) // { // points.push_back(cv::Point(x,y)); // } // } // } // //储存拟合直线的容器 // cv::Vec4f line; // //直线拟合函数 // fitLine(cv::Mat(points),line,CV_DIST_L12,0,0.01,0.01); // //画一个线段 // int x0= line[2]; // int y0= line[3]; // int x1= x0-200*line[0]; // int y1= y0-200*line[1]; // if(y0 == y1 /*|| x0 == x1*/) // { // cv::line(image,cv::Point(x0,y0),cv::Point(x1,y1),cv::Scalar(0,255,0),1); // imwrite("123.jpg",image); // } // } // } return 0; }
void main(int argc,char *argv[]) { int c; IplImage* color_img; IplImage* hsv_img; IplImage* h_img; IplImage* gray; int flags = CV_WINDOW_AUTOSIZE; CvCapture* cv_cap = cvCaptureFromCAM(CAMERA_0); // Capture from CAMERA 0 int h = 180; int t1 = 3, t2 = 5; CvScalar min = CV_RGB(h-15,100,0); CvScalar max = CV_RGB(h+15,256,256); /* Create ellipse to despeckle hsv. */ IplConvKernel* ellipse = cvCreateStructuringElementEx(10, 10, 1, 1, CV_SHAPE_ELLIPSE, NULL); /* For X, Y, And Area */ CvMoments moments; /* For contours */ CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contours = 0; double area, m01, m10; if (!cv_cap) goto fail; cvNamedWindow("Webcam Video", flags); // create window cvNamedWindow("hsv Video", flags); // create window cvNamedWindow("Contour Video", flags); // create window cvCreateTrackbar("Hue", "hsv Video", &h, 256, set_h); cvCreateTrackbar("countour1", "Contour Video", &t1, 256, set_h); cvCreateTrackbar("countour2", "Contour Video", &t2, 256, set_h); for(;;) { color_img = cvQueryFrame(cv_cap); // get frame if(color_img != 0) { hsv_img = cvCreateImage(cvGetSize(color_img), IPL_DEPTH_8U, 3); gray = cvCreateImage(cvGetSize(hsv_img), IPL_DEPTH_8U, 1); } cvCvtColor(color_img, gray, CV_RGB2GRAY); cvCvtColor(color_img, hsv_img, CV_BGR2HSV); h_img = cvCreateImage(cvGetSize(hsv_img), IPL_DEPTH_8U, 1); /* HSV */ min = CV_RGB(h-20,10,10); max = CV_RGB(h+20,256,256); /* Remove anything not in the hue range. */ cvInRangeS(hsv_img, min, max, h_img); /* Remove noise, or at least make the blotches bigger? */ cvErode(h_img, h_img, ellipse,1); cvDilate(h_img, h_img, ellipse,1); /* Calculate moments to figure out if the object is present */ cvMoments(h_img, &moments, 1); area = cvGetSpatialMoment(&moments, 0,0); m01 = cvGetSpatialMoment(&moments, 0,1); m10 = cvGetSpatialMoment(&moments, 1,0); if (area > 17000) { int x = m10/area; int y = m01/area; printf("x = %d, y = %d (area = %f)\n", x, y, area); } /* Draw contours */ cvCanny(gray, gray, (double)t1, (double)t2, 3); //cvDilate(gray, gray, 0, 1); cvDilate(gray, gray, ellipse, 1); cvFindContours(gray, storage, &contours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_NONE, cvPoint(0,0)); cvDrawContours(color_img, contours, CV_RGB(254,0,0), CV_RGB(0,255,0), 10, 1, CV_AA, cvPoint(0,0)); /* Display images */ cvShowImage("hsv Video", h_img); // show frame cvShowImage("Contour Video", gray); // show frame cvShowImage("Webcam Video", color_img); // show frame c = cvWaitKey(KS_WAIT); // wait KS_WAIT ms or for key stroke if(c == 27) break; // if ESC, break and quit } /* clean up */ cvReleaseCapture( &cv_cap ); cvDestroyWindow("Webcam Video"); return; fail: printf("capture from cam failed\n"); }
CameraControl * camera_control_new_with_settings(int cameraID, int width, int height, int framerate, int cam_type) { CameraControl* cc = (CameraControl*) calloc(1, sizeof(CameraControl)); cc->cameraID = cameraID; if (framerate <= 0) { framerate = PSMOVE_TRACKER_DEFAULT_FPS; } if (cam_type == PSMove_Camera_PS3EYE_BLUEDOT) { cc->focl_x = (float)PS3EYE_FOCAL_LENGTH_BLUE; cc->focl_y = (float)PS3EYE_FOCAL_LENGTH_BLUE; } else if (cam_type == PSMove_Camera_PS3EYE_REDDOT) { cc->focl_x = (float)PS3EYE_FOCAL_LENGTH_RED; cc->focl_y = (float)PS3EYE_FOCAL_LENGTH_RED; } else if (cam_type == PSMove_Camera_Unknown) { cc->focl_x = (float)PS3EYE_FOCAL_LENGTH_BLUE; cc->focl_y = (float)PS3EYE_FOCAL_LENGTH_BLUE; } // Needed for cbb tracker. Will be overwritten by camera calibration files if they exist. #if defined(CAMERA_CONTROL_USE_CL_DRIVER) // Windows 32-bit. Either CL_SDK or Registry_requiring int cams = CLEyeGetCameraCount(); if (cams <= cameraID) { free(cc); return NULL; } GUID cguid = CLEyeGetCameraUUID(cameraID); cc->camera = CLEyeCreateCamera(cguid, CLEYE_COLOR_PROCESSED, CLEYE_VGA, framerate); CLEyeCameraGetFrameDimensions(cc->camera, &width, &height); // Depending on color mode chosen, create the appropriate OpenCV image cc->frame4ch = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 4); cc->frame3ch = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); CLEyeCameraStart(cc->camera); #elif defined(CAMERA_CONTROL_USE_PS3EYE_DRIVER) // Mac or Windows // Initialize PS3EYEDriver ps3eye_init(); int cams = ps3eye_count_connected(); psmove_DEBUG("Found %i ps3eye(s) with CAMERA_CONTROL_USE_PS3EYE_DRIVER.\n", cams); if (cams <= cameraID) { free(cc); return NULL; } if (width <= 0 || height <= 0) { get_metrics(&width, &height); } psmove_DEBUG("Attempting to open ps3eye with cameraId, width, height, framerate: %d, %d, %d, %d.\n", cameraID, width, height, framerate); cc->eye = ps3eye_open(cameraID, width, height, framerate); if (cc->eye == NULL) { psmove_WARNING("Failed to open camera ID %d", cameraID); free(cc); return NULL; } cc->framebgr = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); #else // Assume webcam accessible from OpenCV. char *video = psmove_util_get_env_string(PSMOVE_TRACKER_FILENAME_ENV); if (video) { psmove_DEBUG("Using '%s' as video input.\n", video); cc->capture = cvCaptureFromFile(video); free(video); } else { cc->capture = cvCaptureFromCAM(cc->cameraID); if (width <= 0 || height <= 0) { get_metrics(&width, &height); } cvSetCaptureProperty(cc->capture, CV_CAP_PROP_FRAME_WIDTH, width); cvSetCaptureProperty(cc->capture, CV_CAP_PROP_FRAME_HEIGHT, height); } #endif cc->width = width; cc->height = height; cc->deinterlace = PSMove_False; return cc; }
void demo_classifier(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename) { #ifdef OPENCV printf("Classifier Demo\n"); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); list *options = read_data_cfg(datacfg); srand(2222222); CvCapture * cap; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } int top = option_find_int(options, "top", 1); char *name_list = option_find_str(options, "names", 0); char **names = get_labels(name_list); int *indexes = calloc(top, sizeof(int)); if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow("Classifier", CV_WINDOW_NORMAL); cvResizeWindow("Classifier", 512, 512); float fps = 0; int i; while(1){ struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); image in = get_image_from_stream(cap); image in_s = resize_image(in, net->w, net->h); show_image(in, "Classifier"); float *predictions = network_predict(net, in_s.data); if(net->hierarchy) hierarchy_predictions(predictions, net->outputs, net->hierarchy, 1, 1); top_predictions(net, top, indexes); printf("\033[2J"); printf("\033[1;1H"); printf("\nFPS:%.0f\n",fps); for(i = 0; i < top; ++i){ int index = indexes[i]; printf("%.1f%%: %s\n", predictions[index]*100, names[index]); } free_image(in_s); free_image(in); cvWaitKey(10); gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); float curr = 1000000.f/((long int)tval_result.tv_usec); fps = .9*fps + .1*curr; } #endif }
int main() { int c = 0, i = 0; CvCapture* capture = cvCaptureFromCAM(0); if(!cvQueryFrame(capture)) { printf("Video capture failed, please check the camera."); } else { printf("Video camera capture status: OK"); } CvSize sz = cvGetSize(cvQueryFrame( capture)); height = sz.height; width = sz.width; step = sz.width; IplImage* src = cvCreateImage( sz, 8, 3 ); IplImage* hsv_image = cvCreateImage( sz, 8, 3); IplImage* hsv_mask = cvCreateImage( sz, 8, 1); IplImage* handview = cvCreateImage(sz, 8, 1); CvScalar hsv_min = cvScalar(5, 70, 0, 0); CvScalar hsv_max = cvScalar(20, 150, 255, 0); //H-> 0-20 while( c != 27) { //printf("%d\t\t",framecount); src = cvQueryFrame( capture); cvCvtColor(src, hsv_image, CV_BGR2HSV); cvInRangeS (hsv_image, hsv_min, hsv_max, hsv_mask); cvSmooth(hsv_mask, handview, CV_MEDIAN, 5, 0, 0, 0); cvDilate(handview, handview, NULL, 3); //cvDilate(hsv_mask, handview, NULL, 1); //cvErode(handview, handview, NULL, 1); //cvDilate(handview, handview, NULL, 1); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contour = 0; cvFindContours(handview, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); cvZero(handview); for( ; contour != 0; contour = contour->h_next ) { // replace CV_FILLED with 1 to see the outlines double area = cvContourArea( contour, CV_WHOLE_SEQ, 0); if(area > 500) { cvDrawContours( handview, contour, cvScalar( 255 ), cvScalar( 255 ), 0, 0, 8 ); //max = area; } } //cvShowImage("handview", handview); tips(handview); //cvNamedWindow( "hsv-msk",1); //cvShowImage( "hsv-msk", hsv_mask); //hsv_mask->origin = 1; for(i=0; i<tipcount; i++) { if(posmax == i) cvCircle(src, cvPoint(tips_position[posmax][1], tips_position[posmax][0]), 3, cvScalar(0,255,0), 2, 8, 0); else cvCircle(src, cvPoint(tips_position[i][1], tips_position[i][0]), 3, cvScalar(255,0,0), 2, 8, 0); if(speed[i][1] > 1 && speed[i][2] > 1 && (int)speed[i][5] == 1 && oldtips[(oldtipflag+1)%FRAMERUN][i][2] == 1) { cvCircle(src, cvPoint(speed[i][4], speed[i][3]), 5, cvScalar(0,0,255), 3, 8, 0); speed[i][1] = 0; speed[i][2] = 0; speed[i][5] = 0; //printf("check\t"); } else if(speed[i][1] > 1 && speed[i][2] > 1 && oldtips[(oldtipflag+1)%FRAMERUN][i][2] == -1) { //cvCircle(src, cvPoint(speed[posmax][4], speed[posmax][3]), 9, cvScalar(0,0,255), 3, 8, 0); speed[i][1] = speed[i][2]; speed[i][2] = 0; speed[i][5] = 0; //printf("check\t"); } } //printf("%d\t%d\t%d", (int)speed[3][1], (int)speed[3][2], (int)speed[3][5]); //printf("\n"); /*if(speed[posmax][1] > 1 && speed[posmax][2] > 1 && oldtips[(oldtipflag+1)%FRAMERUN][posmax][2] == 1) { cvCircle(src, cvPoint(speed[posmax][4], speed[posmax][3]), 5, cvScalar(0,0,255), 3, 8, 0); speed[posmax][1]=0; speed[posmax][2]=0; //printf("check\t"); } else if(speed[posmax][1] > 1 && speed[posmax][2] > 1 && oldtips[(oldtipflag+1)%FRAMERUN][posmax][2] == -1) { //cvCircle(src, cvPoint(speed[posmax][4], speed[posmax][3]), 5, cvScalar(0,0,255), 3, 8, 0); speed[posmax][1]=speed[posmax][2]; speed[posmax][2]=0; //printf("check\t"); }*/ //sprintf(framename, "./frames/frame%d.jpg", framecount++); //cvSaveImage(framename, src); //maxspeed = posmax; oldtipflag = (oldtipflag+1)%FRAMERUN; //printf("%d\t%f\t3\t%f\n", posmax, speed[posmax][0], speed[3][0]); cvNamedWindow( "src",1); cvShowImage( "src", src); c = cvWaitKey( 10); } cvReleaseCapture( &capture); cvReleaseImage(&hsv_image); cvReleaseImage(&hsv_mask); cvReleaseImage(&handview); cvDestroyAllWindows(); }
void gun_classifier(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename) { #ifdef OPENCV int bad_cats[] = {218, 539, 540, 1213, 1501, 1742, 1911, 2415, 4348, 19223, 368, 369, 370, 1133, 1200, 1306, 2122, 2301, 2537, 2823, 3179, 3596, 3639, 4489, 5107, 5140, 5289, 6240, 6631, 6762, 7048, 7171, 7969, 7984, 7989, 8824, 8927, 9915, 10270, 10448, 13401, 15205, 18358, 18894, 18895, 19249, 19697}; printf("Classifier Demo\n"); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); list *options = read_data_cfg(datacfg); srand(2222222); CvCapture * cap; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } int top = option_find_int(options, "top", 1); char *name_list = option_find_str(options, "names", 0); char **names = get_labels(name_list); int *indexes = calloc(top, sizeof(int)); if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow("Threat Detection", CV_WINDOW_NORMAL); cvResizeWindow("Threat Detection", 512, 512); float fps = 0; int i; while(1){ struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); image in = get_image_from_stream(cap); image in_s = resize_image(in, net->w, net->h); show_image(in, "Threat Detection"); float *predictions = network_predict(net, in_s.data); top_predictions(net, top, indexes); printf("\033[2J"); printf("\033[1;1H"); int threat = 0; for(i = 0; i < sizeof(bad_cats)/sizeof(bad_cats[0]); ++i){ int index = bad_cats[i]; if(predictions[index] > .01){ printf("Threat Detected!\n"); threat = 1; break; } } if(!threat) printf("Scanning...\n"); for(i = 0; i < sizeof(bad_cats)/sizeof(bad_cats[0]); ++i){ int index = bad_cats[i]; if(predictions[index] > .01){ printf("%s\n", names[index]); } } free_image(in_s); free_image(in); cvWaitKey(10); gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); float curr = 1000000.f/((long int)tval_result.tv_usec); fps = .9*fps + .1*curr; } #endif }
void threat_classifier(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename) { #ifdef OPENCV float threat = 0; float roll = .2; printf("Classifier Demo\n"); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); list *options = read_data_cfg(datacfg); srand(2222222); CvCapture * cap; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } int top = option_find_int(options, "top", 1); char *name_list = option_find_str(options, "names", 0); char **names = get_labels(name_list); int *indexes = calloc(top, sizeof(int)); if(!cap) error("Couldn't connect to webcam.\n"); //cvNamedWindow("Threat", CV_WINDOW_NORMAL); //cvResizeWindow("Threat", 512, 512); float fps = 0; int i; int count = 0; while(1){ ++count; struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); image in = get_image_from_stream(cap); if(!in.data) break; image in_s = resize_image(in, net->w, net->h); image out = in; int x1 = out.w / 20; int y1 = out.h / 20; int x2 = 2*x1; int y2 = out.h - out.h/20; int border = .01*out.h; int h = y2 - y1 - 2*border; int w = x2 - x1 - 2*border; float *predictions = network_predict(net, in_s.data); float curr_threat = 0; if(1){ curr_threat = predictions[0] * 0 + predictions[1] * .6 + predictions[2]; } else { curr_threat = predictions[218] + predictions[539] + predictions[540] + predictions[368] + predictions[369] + predictions[370]; } threat = roll * curr_threat + (1-roll) * threat; draw_box_width(out, x2 + border, y1 + .02*h, x2 + .5 * w, y1 + .02*h + border, border, 0,0,0); if(threat > .97) { draw_box_width(out, x2 + .5 * w + border, y1 + .02*h - 2*border, x2 + .5 * w + 6*border, y1 + .02*h + 3*border, 3*border, 1,0,0); } draw_box_width(out, x2 + .5 * w + border, y1 + .02*h - 2*border, x2 + .5 * w + 6*border, y1 + .02*h + 3*border, .5*border, 0,0,0); draw_box_width(out, x2 + border, y1 + .42*h, x2 + .5 * w, y1 + .42*h + border, border, 0,0,0); if(threat > .57) { draw_box_width(out, x2 + .5 * w + border, y1 + .42*h - 2*border, x2 + .5 * w + 6*border, y1 + .42*h + 3*border, 3*border, 1,1,0); } draw_box_width(out, x2 + .5 * w + border, y1 + .42*h - 2*border, x2 + .5 * w + 6*border, y1 + .42*h + 3*border, .5*border, 0,0,0); draw_box_width(out, x1, y1, x2, y2, border, 0,0,0); for(i = 0; i < threat * h ; ++i){ float ratio = (float) i / h; float r = (ratio < .5) ? (2*(ratio)) : 1; float g = (ratio < .5) ? 1 : 1 - 2*(ratio - .5); draw_box_width(out, x1 + border, y2 - border - i, x2 - border, y2 - border - i, 1, r, g, 0); } top_predictions(net, top, indexes); char buff[256]; sprintf(buff, "/home/pjreddie/tmp/threat_%06d", count); //save_image(out, buff); printf("\033[2J"); printf("\033[1;1H"); printf("\nFPS:%.0f\n",fps); for(i = 0; i < top; ++i){ int index = indexes[i]; printf("%.1f%%: %s\n", predictions[index]*100, names[index]); } if(1){ show_image(out, "Threat"); cvWaitKey(10); } free_image(in_s); free_image(in); gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); float curr = 1000000.f/((long int)tval_result.tv_usec); fps = .9*fps + .1*curr; } #endif }
int main(){ CvCapture* capture =0; capture = cvCaptureFromCAM(0); if(!capture){ printf("Capture failure\n"); return -1; } IplImage* frame=0; frame = cvQueryFrame(capture); if(!frame) return -1; //create a blank image and assigned to 'imgTracking' which has the same size of original video imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3); cvZero(imgTracking); //covert the image, 'imgTracking' to black cvNamedWindow("Video"); cvNamedWindow("Ball"); //iterate through each frames of the video while(true){ frame = cvQueryFrame(capture); if(!frame) break; frame=cvCloneImage(frame); cvSmooth(frame, frame, CV_GAUSSIAN,3,3); //smooth the original image using Gaussian kernel IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); cvCvtColor(frame, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV IplImage* imgThresh = GetThresholdedImage(imgHSV, 150, 127, 134, 193, 177, 170); cvSmooth(imgThresh, imgThresh, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel trackObject(imgThresh, 255, 0, 0, 1); IplImage* imgHSV2 = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); cvCvtColor(frame, imgHSV2, CV_BGR2HSV); IplImage* imgThresh2 = GetThresholdedImage(imgHSV2, 37, 67, 135, 113, 115, 202); cvSmooth(imgThresh2, imgThresh2, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel trackObject(imgThresh2, 0, 255, 0, 2); // Add the tracking image and the frame cvAdd(frame, imgTracking, frame); cvShowImage("Ball", imgThresh); cvShowImage("Ball2", imgThresh2); cvShowImage("Video", frame); //Clean up used images cvReleaseImage(&imgHSV); cvReleaseImage(&imgHSV2); cvReleaseImage(&imgThresh); cvReleaseImage(&imgThresh2); cvReleaseImage(&frame); //Wait 10mS int c = cvWaitKey(10); //If 'ESC' is pressed, break the loop if((char)c==27 ) break; } cvDestroyAllWindows() ; cvReleaseImage(&imgTracking); cvReleaseCapture(&capture); return 0; }
int main() { CvCapture* capture = 0; printf("Initializing Capture...\n"); // capture = cvCreateFileCapture("http://192.168.1.100/videostream.cgi?user=admin&pwd=123456"); capture = cvCaptureFromCAM( CV_CAP_ANY ); printf("Camera found\n"); if(!capture){printf("Could not initialize camera...\n");} printf("Capture Success!!!\n"); cvNamedWindow("Video", CV_WINDOW_AUTOSIZE); cvResizeWindow("Video", 640,480); cvMoveWindow("Video", 0,100); cvNamedWindow("HSV",CV_WINDOW_AUTOSIZE); cvNamedWindow("ROI"); cvMoveWindow("ROI", 700,100); int state = 0; int track = 1; int change= 2; int reset = 3; int clear = 4; while(1) { // Hold frame captured from camera IplImage* img = cvQueryFrame(capture); if(!img){printf("Image Query Failed");break;} key = cvWaitKey(10); if(key=='t'){state = track;} if(key=='c'){state = change;} if(key=='r'){state = reset;} if(key=='q'){state = clear;} key = cvWaitKey(10); if(state == track) { printf("Tracking\n"); imgRed = getThresholdImage(img); imgPos = getMoment(img); } key = cvWaitKey(10); if(state == change) { cvRectangle(img,cvPoint(250,300),cvPoint(400,200),CV_RGB(255,0,0),5,8); cvWaitKey(30); cvShowImage("Video",img); //printf("Getting Color\n"); IplImage* imgColor = getColor(img); state = 0; } key = cvWaitKey(10); if(state == reset) { printf("Resetting\n"); minC = hsv_min; maxC = hsv_max; state = 0; } if(state == clear) { printf("Paused\n"); state = 0; } else { key = cvWaitKey(10); } cvShowImage("HSV", imgRed); cvShowImage("Video", img); cvReleaseImage(&imgPos); cvReleaseImage(&imgRed); if ( (cvWaitKey(10) & 255) == 27 ) break; //key = cvWaitKey(10); } cvDestroyWindow("Video"); cvDestroyWindow("HSV"); cvDestroyWindow("ROI"); cvReleaseCapture(&capture); }
int main(int argc, char** argv){ // Set up variables //set the camera capture capture = cvCaptureFromCAM(0); // create a new window with auto sizing cvNamedWindow(window_name, 1); // create a window //cvNamedWindow("HSV Window", CV_WINDOW_AUTOSIZE); // create a window //cvNamedWindow("Grey Window", CV_WINDOW_AUTOSIZE); // create a window cvNamedWindow("Target", CV_WINDOW_AUTOSIZE); cvNamedWindow("Target2", CV_WINDOW_AUTOSIZE); cvNamedWindow("Before", CV_WINDOW_AUTOSIZE); cvNamedWindow("First", CV_WINDOW_AUTOSIZE); cvNamedWindow("Second", CV_WINDOW_AUTOSIZE); cvNamedWindow("Before2", CV_WINDOW_AUTOSIZE); cvNamedWindow("Third", CV_WINDOW_AUTOSIZE); while(quit != 'q'){ //grab frame from webcam g_image = cvQueryFrame(capture); // display frame cvShowImage(window_name,g_image); on_trackbar(0); //cvShowImage("HSV Window", g_gray ); //cvShowImage("Grey Window", g_grey ); somethingNew(); cvShowImage("Target", g_grey ); cvShowImage("Target2", g_gray ); /*something(); cvShowImage("blobOut", g_grey );*/ /*printf("Thresh is %d\n",new_thresh++); if(new_thresh == 255){ new_thresh = 0; }*/ //check for q to quit quit = cvWaitKey(1); // delay for a moment, delay is under 2 it doesn't seem to work cvWaitKey(DELAY); } // before quitting it releases memory cvReleaseCapture(&capture); cvDestroyWindow(window_name); return 0; }
void MainWindow::OpticalFlow() { cvReleaseCapture(&pCapture); // pCapture=cvCaptureFromCAM(0); //use webcam CvCapture* cam = cvCaptureFromCAM( 0 ) ; while(1) { //get a color image IplImage* frame = cvQueryFrame(cam) ; CvSize img_sz = cvGetSize(frame); const int win_size = 10 ; //convert the image to grey image IplImage* frame_prev = cvQueryFrame(cam) ; IplImage* img_prev = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ; cvCvtColor( frame_prev,img_prev ,CV_BGR2GRAY); //convert the image to grey image IplImage* frame_cur = cvQueryFrame(cam) ; IplImage* img_curr = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ; cvCvtColor( frame_cur,img_curr ,CV_BGR2GRAY); //create a imge to display result IplImage* img_res = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ; for ( int y = 0 ; y < img_sz.height ; ++y ) { uchar* ptr = (uchar*)( img_res->imageData + y * img_res->widthStep ) ; for ( int x = 0 ; x <img_res->width; ++x ) { ptr[x] = 255 ; } } //get good features IplImage* img_eig = cvCreateImage(img_sz,IPL_DEPTH_32F,1) ; IplImage* img_temp = cvCreateImage(img_sz,IPL_DEPTH_32F,1) ; int corner_count = MAX_CORNERS ; CvPoint2D32f* features_prev = new CvPoint2D32f[MAX_CORNERS] ; cvGoodFeaturesToTrack( img_prev, img_eig, img_temp, features_prev, &corner_count, 0.01, 5.0, 0, 3, 0, 0.4 ); cvFindCornerSubPix( img_prev, features_prev, corner_count, cvSize(win_size,win_size), cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER,20,0.03) ); // L-K char feature_found[ MAX_CORNERS ] ; float feature_errors[ MAX_CORNERS ] ; CvSize pyr_sz = cvSize( frame->width + 8 ,frame->height / 3 ) ; IplImage* pyr_prev = cvCreateImage(pyr_sz,IPL_DEPTH_32F,1) ; IplImage* pyr_cur = cvCreateImage(pyr_sz,IPL_DEPTH_32F,1) ; CvPoint2D32f* features_cur = new CvPoint2D32f[ MAX_CORNERS ] ; cvCalcOpticalFlowPyrLK( img_prev, img_curr, pyr_prev, pyr_cur, features_prev, features_cur, corner_count, cvSize(win_size,win_size), 5, feature_found, feature_errors, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER,20,0.3), 0 ); for ( int i = 0 ; i < corner_count ; i++) { if ( 0 == feature_found[i] || feature_errors[i] > 550 ) { // printf("error is %f \n" , feature_errors[i] ) ; continue ; } // printf("find it !\n") ; CvPoint pt_prev = cvPoint( features_prev[i].x , features_prev[i].y ) ; CvPoint pt_cur = cvPoint( features_cur[i].x , features_cur[i].y ) ; cvLine( img_res,pt_prev,pt_cur,CV_RGB( 255,0,0),2 ); } if(27==cvWaitKey(33)) break; MainWindow::Display(frame_cur,img_curr,img_res); cvReleaseImage(&img_curr); cvReleaseImage(&img_eig); cvReleaseImage(&img_prev); cvReleaseImage(&img_res); cvReleaseImage(&img_temp); } }
// Main function, defines the entry point for the program. int main( int argc, char** argv ) { // Structure for getting video from camera or avi CvCapture* capture = 0; // Images to capture the frame from video or camera or from file IplImage *frame, *frame_copy = 0; // Used for calculations int optlen = strlen("--cascade="); // Input file name for avi or image file. const char* input_name; // Check for the correct usage of the command line if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 ) { cascade_name = argv[1] + optlen; input_name = argc > 2 ? argv[2] : 0; } else { fprintf( stderr, "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n" ); return -1; /*input_name = argc > 1 ? argv[1] : 0;*/ } // Load the HaarClassifierCascade cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 ); // Check whether the cascade has loaded successfully. Else report and error and quit if( !cascade ) { fprintf( stderr, "ERROR: Could not load classifier cascade\n" ); return -1; } // Allocate the memory storage storage = cvCreateMemStorage(0); // Find whether to detect the object from file or from camera. if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') ) capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' ); else capture = cvCaptureFromAVI( input_name ); // Create a new named window with title: result cvNamedWindow( "result", 1 ); // Find if the capture is loaded successfully or not. // If loaded succesfully, then: if( capture ) { // Capture from the camera. for(int i=0;;i++) { // Capture the frame and load it in IplImage if( !cvGrabFrame( capture )) break; frame = cvRetrieveFrame( capture ); // If the frame does not exist, quit the loop if( !frame ) break; // Allocate framecopy as the same size of the frame if( !frame_copy ) frame_copy = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_8U, frame->nChannels ); // Check the origin of image. If top left, copy the image frame to frame_copy. if( frame->origin == IPL_ORIGIN_TL ) cvCopy( frame, frame_copy, 0 ); // Else flip and copy the image else cvFlip( frame, frame_copy, 0 ); // Call the function to detect and draw the face char fname[50]; sprintf(fname, "camera_stream%05d.jpg", i); detect_and_draw( frame_copy, fname ); // Wait for a while before proceeding to the next frame if( cvWaitKey( 10 ) >= 0 ) break; } // Release the images, and capture memory cvReleaseImage( &frame_copy ); cvReleaseCapture( &capture ); } // If the capture is not loaded succesfully, then: else { // Assume the image to be lena.jpg, or the input_name specified const char* filename = input_name ? input_name : (char*)"lena.jpg"; // Load the image from that filename IplImage* image = cvLoadImage( filename, 1 ); // If Image is loaded succesfully, then: if( image ) { // Detect and draw the face detect_and_draw( image, filename ); // Wait for user input cvWaitKey(0); // Release the image memory cvReleaseImage( &image ); } else { /* assume it is a text file containing the list of the image filenames to be processed - one per line */ FILE* f = fopen( filename, "rt" ); if( f ) { char buf[1000+1]; // Get the line from the file while( fgets( buf, 1000, f ) ) { // Remove the spaces if any, and clean up the name int len = (int)strlen(buf); while( len > 0 && isspace(buf[len-1]) ) len--; buf[len] = '\0'; // Load the image from the filename present in the buffer image = cvLoadImage( buf, 1 ); // If the image was loaded succesfully, then: if( image ) { // Detect and draw the face from the image detect_and_draw( image, buf ); // Wait for the user input, and release the memory //cvWaitKey(0); cvReleaseImage( &image ); } } // Close the file fclose(f); } } } // Destroy the window previously created with filename: "result" cvDestroyWindow("result"); printf("Done.\n"); // return 0 to indicate successfull execution of the program return 0; }
void MainWindow::OpencvOpticalFlow() { cvReleaseCapture(&pCapture); pCapture=cvCaptureFromCAM(0); IplImage* pre; IplImage* next; int corner_count=MAX_CORNERS; while(1) { // pre=cvQueryFrame(pCapture); next=cvQueryFrame(pCapture); CvSize img_sz=cvGetSize(pre); IplImage* imgC=cvCreateImage(img_sz,IPL_DEPTH_8U,1); // IplImage* imgA=cvCreateImage(img_sz,IPL_DEPTH_8U,1); IplImage* imgB=cvCreateImage(img_sz,IPL_DEPTH_8U,1); cvCvtColor(pre,imgA,CV_BGR2GRAY); cvCvtColor(next,imgB,CV_BGR2GRAY); // IplImage* eig_image=cvCreateImage(img_sz,IPL_DEPTH_32F,1); IplImage* tmp_image=cvCreateImage(img_sz,IPL_DEPTH_32F,1); CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ]; // cvGoodFeaturesToTrack( imgA, eig_image, tmp_image, cornersA, &corner_count, 0.01, 5.0, 0, 3, 0, 0.04 ); cvFindCornerSubPix( imgA, cornersA, corner_count, cvSize(win_size,win_size), cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03) ); char features_found[ MAX_CORNERS ]; float feature_errors[ MAX_CORNERS ]; // CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 ); IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 ); IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 ); CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ]; cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count, cvSize( win_size,win_size ), 5, features_found, feature_errors, cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ), 0 ); for( int i=0; i<corner_count; i++ ) { if( features_found[i]==0|| feature_errors[i]>550 ) { // printf("Error is %f/n",feature_errors[i]); continue; } // printf("Got it/n"); CvPoint p0 = cvPoint( cvRound( cornersA[i].x ), cvRound( cornersA[i].y ) ); CvPoint p1 = cvPoint( cvRound( cornersB[i].x ), cvRound( cornersB[i].y ) ); cvLine( imgC, p0, p1, CV_RGB(255,0,0),2 ); } if(27==cvWaitKey(33)) break; MainWindow::Display(imgA,imgB,imgC); } }
int main( int argc, char** argv ) { /* * serial stuff */ int fd = 0; char serialport[256]; int baudrate = B19200; //FIXME fd = serialport_init("/dev/ttyACM0", baudrate); if(fd==-1) return -1; usleep(3000 * 1000); /////////////////////////////////////////////////////// int c = 0, fps = 0; //capture from camera CvCapture *capture = cvCaptureFromCAM(1); //quit if camera not found if(!capture) { printf("cannot init capture!\n"); return -1; } //display original video stream cvNamedWindow("stream", CV_WINDOW_NORMAL); cvResizeWindow("stream", 320, 240); cvNamedWindow("hue", CV_WINDOW_NORMAL); cvResizeWindow("hue", 320, 240); cvMoveWindow("hue", 320, 0); CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 2, CV_AA); //keep capturing frames until escape while(c != 27) //27 is escape key { //quit if can't grab frame if(!(frame = cvQueryFrame(capture))) break; //show the default image //cvShowImage("stream", frame); //edge detection - todo: HSV color filtering findLine(frame); //edge detection findEdge(frame); if(flag == 2) cvPutText(frame, "right edge", cvPoint(30, 400), &font, cvScalar(255, 0, 0, 0)); else if(flag == 1) cvPutText(frame, "left edge", cvPoint(30, 400), &font, cvScalar(255, 0, 0, 0)); //display center of gravity in coordinates COG(&cog_x, &cog_y); char x_coord[10]; char y_coord[10]; sprintf(x_coord, "%1.2f", cog_x); sprintf(y_coord, "%1.2f", cog_y); //find center of y coordinate in left and right edge COG_edges(&left_y, &right_y); //printf("%1.2f\n", left_y); //printf("%1.2f\n", right_y); //find slant findSlant(frame); char write_slant[15]; if(slant && offset) sprintf(write_slant, "%s", "crooked slant!"); //eventually invokes s movement else sprintf(write_slant, "%s", "\0"); //TODO: FIXME //motor1 is closer to bottom, motor 2 is closer to top (in starting config) uint8_t b = 0b00000000; uint8_t rb = 0; //motor logic char motor1[10]; char motor2[10]; //handles any errors - move 0.5 second for now //might have to flip these //left, right -> looking from top of roof down if(0)//flag == 2) //edge on right { b = 0b00000111; sprintf(motor1, "%s", "backward"); sprintf(motor2, "%s", "backward"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; } else if(0)//flag == 1) //edge on left { b = 0b00000100; sprintf(motor1, "%s", "forward"); sprintf(motor2, "%s", "forward"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; } else if(0)//ROR) //rotate right (eventually look at angle and change timing appropriately) { printf("%s\n", "ROTATE RIGHT"); b = 0b00000101; sprintf(motor1, "%s", "forward"); sprintf(motor2, "%s", "backward"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; } else if(0)//ROL) //rotate left (eventually look at angle and change timing appropriately) { printf("%s\n", "ROTATE LEFT"); b = 0b00000110; sprintf(motor1, "%s", "backward"); sprintf(motor2, "%s", "forward"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; } /* else if(MOVEUP) //move up, s motion { printf("%s\n", "MOVE UP"); //rotate left wheel backward - S b = 0b10000010; sprintf(motor1, "%s", "backward"); sprintf(motor2, "%s", "nothing!"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //rotate right wheel backward - S b = 0b10000001; sprintf(motor1, "%s", "nothing!"); sprintf(motor2, "%s", "backward"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //move forward again b = 0b01000000; sprintf(motor1, "%s", "forward"); sprintf(motor2, "%s", "forward"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; } else if(MOVEDOWN) //move down, s motion { printf("%s\n", "MOVE DOWN"); //rotate right wheel backward - S b = 0b10000001; sprintf(motor1, "%s", "nothing!"); sprintf(motor2, "%s", "backward"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //rotate left wheel backward - S b = 0b10000010; sprintf(motor1, "%s", "backward"); sprintf(motor2, "%s", "nothing!"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //move forward again b = 0b01000000; sprintf(motor1, "%s", "forward"); sprintf(motor2, "%s", "forward"); write(fd,&b,1); while(read(fd,&rb,1) != 1) ; } */ //NO ERROR - THIS HANDLES LOGIC TO DRIVE ROBOT IN CORRECT SEQUENCE //determines the amount of time and directions in which to move the motors. //the arduino is responsible for sending stop command to wheels! else { /* switch(count) { //first shingle - already in place case 0: b = 0b00000000; //do nothing write(fd,&b,1); while(read(fd,&rb,1) != 1) ; printf("%d\n", rb); sleep(6); //however long it takes to place shingle, or wait until arduino says go printf("%s\n", "case 0"); break; //second/third shingle - drive forward one foot (for now 2 secs, figure this out!) case 1: case 2: b = 0b01000000; //forward 1 foot write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //sleep(3); printf("%s\n", "case 1/2"); break; //left (viewing from bottom) overhang case 3: b = 0b00100011; //backward 1/2 foot write(fd,&b,1); while(read(fd,&rb,1) != 1) ; b = 0b00110001; //ROR 90 degrees write(fd,&b,1); while(read(fd,&rb,1) != 1) ; b = 0b00100011; //backward 1/2 foot write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //sleep(12); printf("%s\n", "case 3"); break; //right overhang case 4: b = 0b00110001; //ROR 90 degrees write(fd,&b,1); while(read(fd,&rb,1) != 1) ; b = 0b01000000; //forward 1 foot write(fd,&b,1); while(read(fd,&rb,1) != 1) ; b = 0b00110001; //ROR 90 degrees write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //sleep(14); printf("%s\n", "case 4"); break; //next to right overhang case 5: b = 0b01000011; //backward 1 foot write(fd,&b,1); while(read(fd,&rb,1) != 1) ; b = 0b00110001; //ROR 90 degrees write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //sleep(8); break; //next to left overhang case 6: b = 0b01000000; //forward 1 foot write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //sleep(6); break; //reposition at beginning position case 7: b = 0b00110001; //ROR 90 degrees write(fd,&b,1); while(read(fd,&rb,1) != 1) ; b = 0b00100000; //forward 1/2 foot write(fd,&b,1); while(read(fd,&rb,1) != 1) ; b = 0b00110010; //ROL 90 degrees write(fd,&b,1); while(read(fd,&rb,1) != 1) ; b = 0b01100011; //backward 1+1/2 foot write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //sleep(14); break; //should not get here default: b = 0; //error write(fd,&b,1); while(read(fd,&rb,1) != 1) ; //sleep(20); break; } */ write(fd, &count, 1); while(read(fd, &rb, 1) != 1) ; count = (count+1)%8; //FIXME } //cvPutText(frame, x_coord, cvPoint(30,300), &font, cvScalar(255, 0, 0, 0)); cvPutText(frame, "y:", cvPoint(0,350), &font, cvScalar(255, 0, 0, 0)); cvPutText(frame, y_coord, cvPoint(30,350), &font, cvScalar(255, 0, 0, 0)); //cvPutText(frame, "motor1:", cvPoint(0,150), &font, cvScalar(255, 0, 0, 0)); //cvPutText(frame, motor1, cvPoint(150,150), &font, cvScalar(255, 0, 0, 0)); //cvPutText(frame, "motor2:", cvPoint(0,200), &font, cvScalar(255, 0, 0, 0)); //cvPutText(frame, motor2, cvPoint(150,200), &font, cvScalar(255, 0, 0, 0)); cvPutText(frame, write_slant, cvPoint(0,50), &font, cvScalar(255, 0, 0, 0)); cvShowImage("stream", frame); c = cvWaitKey(10); //release images //cvReleaseImage(&image); //cvReleaseImage(&edge); //cvReleaseImage(&final); //cvReleaseImage(&frame); //cvReleaseImage(&bw); //cvReleaseImage(&gray); } //release created images cvReleaseCapture(&capture); return 0; }
int main() { CvCapture *camera = 0; IplImage *frame = 0, *image = 0; int i, j, k, n = 0; char filename[256]; char c; camera = cvCaptureFromCAM(CV_CAP_ANY); if (!camera) { fprintf(stderr, "Cant initialize camera\n"); return -1; } cvNamedWindow("video", CV_WINDOW_AUTOSIZE); cvMoveWindow("video", 150, 200); for (i = 0; i < 6000; i++) { image = cvQueryFrame(camera); if (!image) { printf("Capture failed "); } else { frame = cvCloneImage(image); } c = cvWaitKey(100); if (c > 0) break; if (frame) { for (j = 0; j < frame->height; j++) { for (k = 0; k < frame->width; k++) { if ((uchar)(image->imageData + j*image->widthStep)[k*image->nChannels + 0] < 70 && (uchar)(image->imageData + j*image->widthStep)[k*image->nChannels + 1] < 70 && (uchar)(image->imageData + j*image->widthStep)[k*image->nChannels + 2] < 70) { if (k > 0 && k < frame->width - 1 && j > 0 && j < frame->height - 1) { for (int w = 0; w <= 2; w++) { (frame->imageData + (j - 1)*frame->widthStep)[(k + 1)*frame->nChannels + w] = (uchar)0; (frame->imageData + (j - 1)*frame->widthStep)[k*frame->nChannels + w] = (uchar)0; (frame->imageData + (j - 1)*frame->widthStep)[(k - 1)*frame->nChannels + w] = (uchar)0; (frame->imageData + j*frame->widthStep)[(k + 1)*frame->nChannels + w] = (uchar)0; (frame->imageData + j*frame->widthStep)[k*frame->nChannels + w] = (uchar)0; (frame->imageData + j*frame->widthStep)[(k - 1)*frame->nChannels + w] = (uchar)0; (frame->imageData + (j + 1)*frame->widthStep)[(k + 1)*frame->nChannels + w] = (uchar)0; (frame->imageData + (j + 1)*frame->widthStep)[k*frame->nChannels + w] = (uchar)0; (frame->imageData + (j + 1)*frame->widthStep)[(k - 1)*frame->nChannels + w] = (uchar)0; } } } } } } cvShowImage("video", frame); } cvReleaseCapture(&camera); cvWaitKey(0); return 0; }
int main( int argc, char** argv ) { contadorBlue = 0; contadorGreen = 0; contadorRed = 0; CvCapture *capture = NULL; IplImage *frame = NULL; IplImage *result = NULL; int key; char *filename = (char*)"aGest.xml"; /* load the classifier note that I put the file in the same directory with this code */ cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 ); /* setup memory buffer; needed by the face detector */ storage = cvCreateMemStorage( 0 ); /* initialize camera */ capture = cvCaptureFromCAM( 0 ); /* always check */ assert( cascade && storage && capture ); /* open and rezise images to be overlayed */ IplImage *drumblue = cvLoadImage("./Drums/DrumBlue.png"); IplImage *drumgreen = cvLoadImage("./Drums/DrumGreen.png"); IplImage *drumred = cvLoadImage("./Drums/DrumRed.png"); IplImage *lineblue = cvLoadImage("./Drums/BlueLine.png"); IplImage *linegreen = cvLoadImage("./Drums/GreenLine.png"); IplImage *linered = cvLoadImage("./Drums/RedLine.png"); IplImage *step1 = cvLoadImage("./Drums/Step.png"); IplImage *step2 = cvLoadImage("./Drums/Step2.png"); IplImage *arrow1 = cvLoadImage("./Drums/Arrow1.png"); IplImage *arrow2 = cvLoadImage("./Drums/Arrow2.png"); IplImage *bien = cvLoadImage("./Drums/Bien.png"); IplImage *buu = cvLoadImage("./Drums/Buu.png"); IplImage *rdrumblue = cvCreateImage(cvSize(110,95),drumblue->depth, drumblue->nChannels); IplImage *rdrumgreen = cvCreateImage(cvSize(110,95),drumgreen->depth, drumgreen->nChannels); IplImage *rdrumred = cvCreateImage(cvSize(110,95),drumred->depth, drumred->nChannels); IplImage *rdrumblue2 = cvCreateImage(cvSize(110,95),drumblue->depth, drumblue->nChannels); IplImage *rdrumgreen2 = cvCreateImage(cvSize(110,95),drumgreen->depth, drumgreen->nChannels); IplImage *rdrumred2 = cvCreateImage(cvSize(110,95),drumred->depth, drumred->nChannels); IplImage *rlineblue = cvCreateImage(cvSize(230,80),lineblue->depth, lineblue->nChannels); IplImage *rlinegreen = cvCreateImage(cvSize(230,80),linegreen->depth, linegreen->nChannels); IplImage *rlinered = cvCreateImage(cvSize(230,80),linered->depth, linered->nChannels); IplImage *rlineblue2 = cvCreateImage(cvSize(230,80),lineblue->depth, lineblue->nChannels); IplImage *rlinegreen2 = cvCreateImage(cvSize(230,80),linegreen->depth, linegreen->nChannels); IplImage *rlinered2 = cvCreateImage(cvSize(230,80),linered->depth, linered->nChannels); IplImage *rstep1 = cvCreateImage(cvSize(100,100),step1->depth, step1->nChannels); IplImage *rstep2 = cvCreateImage(cvSize(100,100),step2->depth, step2->nChannels); IplImage *rarrow1 = cvCreateImage(cvSize(110,70),arrow1->depth, arrow1->nChannels); IplImage *rarrow2 = cvCreateImage(cvSize(110,70),arrow2->depth, arrow2->nChannels); IplImage *rbien = cvCreateImage(cvSize(60,25),bien->depth, bien->nChannels); IplImage *rbuu = cvCreateImage(cvSize(60,25),buu->depth, buu->nChannels); cvResize(drumblue, rdrumblue); cvResize(drumgreen, rdrumgreen); cvResize(drumred, rdrumred); cvResize(drumblue, rdrumblue2); cvResize(drumgreen, rdrumgreen2); cvResize(drumred, rdrumred2); cvResize(lineblue, rlineblue); cvResize(linegreen, rlinegreen); cvResize(linered, rlinered); cvResize(lineblue, rlineblue2); cvResize(linegreen, rlinegreen2); cvResize(linered, rlinered2); cvResize(step1, rstep1); cvResize(step2, rstep2); cvResize(arrow1, rarrow1); cvResize(arrow2, rarrow2); cvResize(bien, rbien); cvResize(buu, rbuu); cvFlip(rdrumblue2, rdrumblue2,1); cvFlip(rdrumgreen2, rdrumgreen2,1); cvFlip(rdrumred2, rdrumred2,1); cvFlip(rlineblue2, rlineblue2,1); cvFlip(rlinegreen2, rlinegreen2,1); cvFlip(rlinered2, rlinered2,1); /* release memory */ cvReleaseImage( &drumblue); cvReleaseImage( &drumgreen); cvReleaseImage( &drumred); cvReleaseImage( &lineblue); cvReleaseImage( &linegreen); cvReleaseImage( &linered ); cvReleaseImage( &step1 ); cvReleaseImage( &step2 ); cvReleaseImage( &arrow1 ); cvReleaseImage( &arrow2 ); cvReleaseImage( &bien); cvReleaseImage( &buu); /* create a window */ cvNamedWindow( "video", 1 ); /* set time and frame variables*/ initGame = clock (); frameN = 0; /* set scores*/ score1 = 0; score2 = 0; redb = false; greenb = false; blueb = false; redb2 = false; greenb2 = false; blueb2 = false; bienn =0; maln =0; std::list<int> lista; lista.push_front(1); lista.push_front(2); lista.push_front(3); lista.push_front(4); lista.push_front(5); while( key != 'q' ) { /* get a frame */ //frame: 640,480 frame = cvQueryFrame( capture ); /* always check */ if( !frame ) break; /* clone and 'fix' frame */ cvFlip( frame, frame, 1 ); GenerateScoreMessage(frame,score1,score2); /* detect Hands and draw boxes */ detectHands( frame, rlineblue2, rlinegreen2, rlinered2, false ); detectHands( frame, rlineblue, rlinegreen, rlinered, true); /* overlay the game play buttons */ cvLine(frame, cvPoint(320,0), cvPoint(320,480), cvScalar(255,255,0), 2); OverlayImage(frame,rdrumblue,cvPoint(0,240),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rdrumgreen,cvPoint(0,315),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rdrumred,cvPoint(0,390),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rdrumblue2,cvPoint(530, 15),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rdrumgreen2,cvPoint(530,90),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rdrumred2,cvPoint(530,165),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rarrow1,cvPoint(0, 23),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rarrow1,cvPoint(0,98),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rarrow1,cvPoint(0,173),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rarrow2,cvPoint(530,248),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rarrow2,cvPoint(530,323),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); OverlayImage(frame,rarrow2,cvPoint(530,398),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); drawAndAdvance(frame,rbien, rbuu, rstep1, rstep2 ); // OverlayImage(frame,rstep1,cvPoint(200,330),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); // OverlayImage(frame,rstep2,cvPoint(400,330),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); // OverlayImage(frame,rbien,cvPoint(200,200),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); // OverlayImage(frame,rbuu,cvPoint(400,200),cvScalar(1,1,1,1),cvScalar(1,1,1,1)); /* display video */ cvShowImage( "video", frame ); /* quit if user press 'q' */ key = cvWaitKey( 10 ); frameN++; } /* free memory */ cvReleaseCapture( &capture ); cvDestroyWindow( "video" ); cvReleaseHaarClassifierCascade( &cascade ); cvReleaseMemStorage( &storage ); return 0; }
int main(int argc, char* argv[]) { int height,width,step,channels; //parameters of the image we are working on int i,j,k,t1min=0,t1max=0,t2min=0,t2max=0,t3min=0,t3max=0; // other variables used CvMat* threshold_matrix = cvCreateMat(2,3,CV_32FC1); CvFileStorage* temp = cvOpenFileStorage("threshold_matrix.xml",NULL,CV_STORAGE_READ); // Load the previous values of the threshold if they exist if(temp) { threshold_matrix = (CvMat*)cvLoad("threshold_matrix.xml"); t1min =(int) CV_MAT_ELEM(*threshold_matrix,float,0,0) ;t2min =(int) CV_MAT_ELEM(*threshold_matrix,float,0,1) ;t3min =(int) CV_MAT_ELEM(*threshold_matrix,float,0,2); t1max =(int) CV_MAT_ELEM(*threshold_matrix,float,1,0) ;t2max =(int) CV_MAT_ELEM(*threshold_matrix,float,1,1) ;t3max =(int) CV_MAT_ELEM(*threshold_matrix,float,1,2) ; } // Open capture device. 0 is /dev/video0, 1 is /dev/video1, etc. CvCapture* capture = cvCaptureFromCAM( 1 ); if( !capture ) { fprintf( stderr, "ERROR: capture is NULL \n" ); getchar(); return -1; } // grab an image from the capture IplImage* frame = cvQueryFrame( capture ); // Create a window in which the captured images will be presented cvNamedWindow( "Camera", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "F1", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "F2", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "F3", CV_WINDOW_AUTOSIZE ); //cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE ); /// Create Trackbars char TrackbarName1[50]="t1min"; char TrackbarName2[50]="t1max"; char TrackbarName3[50]="t2min"; char TrackbarName4[50]="t2max"; char TrackbarName5[50]="t3min"; char TrackbarName6[50]="t3max"; cvCreateTrackbar( TrackbarName1, "F1", &t1min, 260 , NULL ); cvCreateTrackbar( TrackbarName2, "F1", &t1max, 260, NULL ); cvCreateTrackbar( TrackbarName3, "F2", &t2min, 260 , NULL ); cvCreateTrackbar( TrackbarName4, "F2", &t2max, 260, NULL ); cvCreateTrackbar( TrackbarName5, "F3", &t3min, 260 , NULL ); cvCreateTrackbar( TrackbarName6, "F3", &t3max, 260, NULL ); // Load threshold from the slider bars in these 2 parameters CvScalar hsv_min = cvScalar(t1min, t2min, t3min, 0); CvScalar hsv_max = cvScalar(t1max, t2max ,t3max, 0); // get the image data height = frame->height; width = frame->width; step = frame->widthStep; // capture size - CvSize size = cvSize(width,height); // Initialize different images that are going to be used in the program IplImage* hsv_frame = cvCreateImage(size, IPL_DEPTH_8U, 3); // image converted to HSV plane IplImage* thresholded = cvCreateImage(size, IPL_DEPTH_8U, 1); // final thresholded image IplImage* thresholded1 = cvCreateImage(size, IPL_DEPTH_8U, 1); // Component image threshold IplImage* thresholded2 = cvCreateImage(size, IPL_DEPTH_8U, 1); IplImage* thresholded3 = cvCreateImage(size, IPL_DEPTH_8U, 1); IplImage* filtered = cvCreateImage(size, IPL_DEPTH_8U, 1); //smoothed image while( 1 ) { // Load threshold from the slider bars in these 2 parameters hsv_min = cvScalar(t1min, t2min, t3min, 0); hsv_max = cvScalar(t1max, t2max ,t3max, 0); // Get one frame frame = cvQueryFrame( capture ); if( !frame ) { fprintf( stderr, "ERROR: frame is null...\n" ); getchar(); break; } // Covert color space to HSV as it is much easier to filter colors in the HSV color-space. cvCvtColor(frame, hsv_frame, CV_BGR2HSV); // Filter out colors which are out of range. cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded); // the below lines of code is for visual purpose only remove after calibration //--------------FROM HERE----------------------------------- //Split image into its 3 one dimensional images cvSplit( hsv_frame,thresholded1, thresholded2, thresholded3, NULL ); // Filter out colors which are out of range. cvInRangeS(thresholded1,cvScalar(t1min,0,0,0) ,cvScalar(t1max,0,0,0) ,thresholded1); cvInRangeS(thresholded2,cvScalar(t2min,0,0,0) ,cvScalar(t2max,0,0,0) ,thresholded2); cvInRangeS(thresholded3,cvScalar(t3min,0,0,0) ,cvScalar(t3max,0,0,0) ,thresholded3); //-------------REMOVE OR COMMENT AFTER CALIBRATION TILL HERE ------------------ // Memory for hough circles CvMemStorage* storage = cvCreateMemStorage(0); // hough detector works better with some smoothing of the image cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 ); //hough transform to detect circle CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 2, thresholded->height/4, 100, 50, 10, 400); for (int i = 0; i < circles->total; i++) { //get the parameters of circles detected float* p = (float*)cvGetSeqElem( circles, i ); printf("Ball! x=%f y=%f r=%f\n\r",p[0],p[1],p[2] ); // draw a circle with the centre and the radius obtained from the hough transform cvCircle( frame, cvPoint(cvRound(p[0]),cvRound(p[1])), //plot centre 2, CV_RGB(255,255,255),-1, 8, 0 ); cvCircle( frame, cvPoint(cvRound(p[0]),cvRound(p[1])), //plot circle cvRound(p[2]), CV_RGB(0,255,0), 2, 8, 0 ); } /* for testing purpose you can show all the images but when done with calibration only show frame to keep the screen clean */ cvShowImage( "Camera", frame ); // Original stream with detected ball overlay cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space cvShowImage( "After Color Filtering", thresholded ); // The stream after color filtering cvShowImage( "F1", thresholded1 ); // individual filters cvShowImage( "F2", thresholded2 ); cvShowImage( "F3", thresholded3 ); //cvShowImage( "filtered", thresholded ); cvReleaseMemStorage(&storage); //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version), //remove higher bits using AND operator if( (cvWaitKey(10) & 255) == 27 ) break; } //Save the threshold values before exiting *((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 0, 0 ) ) = t1min ;*((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 0, 1 ) ) = t2min ;*((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 0, 2 ) ) = t3min ; *((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 1, 0 ) ) = t1max ;*((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 1, 1 ) ) = t2max ;*((float*)CV_MAT_ELEM_PTR( *threshold_matrix, 1, 2 ) ) = t3max ; cvSave("threshold_matrix.xml",threshold_matrix); // Release the capture device housekeeping cvReleaseCapture( &capture ); cvDestroyWindow( "mywindow" ); return 0; }
int main(int argc, char** argv) { IplImage* temp1 = NULL; IplImage* temp2 = NULL; IplImage* result = NULL; IplImage* result1 = NULL; IplImage* result2 = NULL; CvBGStatModel* bg_model=0; CvBGStatModel* bg_model1=0; IplImage* rawImage = 0; IplImage* yuvImage = 0; IplImage* rawImage1 = 0; IplImage* pFrImg = 0; IplImage* pFrImg1= 0; IplImage* pFrImg2= 0; IplImage* ImaskCodeBookCC = 0; CvCapture* capture = 0; int c,n; maxMod[0] = 25; minMod[0] = 35; maxMod[1] = 8; minMod[1] = 8; maxMod[2] = 8; minMod[2] = 8; argc=2; argv[1]="intelligentroom_raw.avi"; if( argc > 2 ) { fprintf(stderr, "Usage: bkgrd [video_file_name]\n"); return -1; } if (argc ==1) if( !(capture = cvCaptureFromCAM(-1))) { fprintf(stderr, "Can not open camera.\n"); return -2; } if(argc == 2) if( !(capture = cvCaptureFromFile(argv[1]))) { fprintf(stderr, "Can not open video file %s\n", argv[1]); return -2; } bool pause = false; bool singlestep = false; if( capture ) { cvNamedWindow( "原视频序列图像", 1 ); cvNamedWindow("不实时更新的Codebook算法[本文]",1); cvNamedWindow("实时更新的Codebook算法[本文]",1); cvNamedWindow("基于MOG的方法[Chris Stauffer'2001]",1); cvNamedWindow("三帧差分", 1); cvNamedWindow("基于Bayes decision的方法[Liyuan Li'2003]", 1); cvMoveWindow("原视频序列图像", 0, 0); cvMoveWindow("不实时更新的Codebook算法[本文]", 360, 0); cvMoveWindow("实时更新的Codebook算法[本文]", 720, 350); cvMoveWindow("基于MOG的方法[Chris Stauffer'2001]", 0, 350); cvMoveWindow("三帧差分", 720, 0); cvMoveWindow("基于Bayes decision的方法[Liyuan Li'2003]",360, 350); int nFrmNum = -1; for(;;) { if(!pause) { rawImage = cvQueryFrame( capture ); ++nFrmNum; printf("第%d帧\n",nFrmNum); if(!rawImage) break; } if(singlestep) { pause = true; } if(0 == nFrmNum) { printf(". . . wait for it . . .\n"); temp1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3); temp2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3); result1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); result2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); result = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); bg_model = cvCreateGaussianBGModel(rawImage); bg_model1 = cvCreateFGDStatModel(rawImage); rawImage1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 ); yuvImage = cvCloneImage(rawImage); pFrImg = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); pFrImg1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); pFrImg2 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); imageLen = rawImage->width*rawImage->height; cA = new codeBook [imageLen]; cC = new codeBook [imageLen]; cD = new codeBook [imageLen]; for(int f = 0; f<imageLen; f++) { cA[f].numEntries = 0; cA[f].t = 0; cC[f].numEntries = 0; cC[f].t = 0; cD[f].numEntries = 0; cD[f].t = 0; } for(int nc=0; nc<nChannels;nc++) { cbBounds[nc] = 10; } ch[0] = true; ch[1] = true; ch[2] = true; } if( rawImage ) { if(!pause) { cvSmooth(rawImage, rawImage1, CV_GAUSSIAN,3,3); cvChangeDetection(temp1, temp2, result1); cvChangeDetection(rawImage1, temp1, result2); cvAnd(result1, result2, result, NULL); cvCopy(temp1,temp2, NULL); cvCopy(rawImage,temp1, NULL); cvUpdateBGStatModel( rawImage, bg_model ); cvUpdateBGStatModel( rawImage, bg_model1 ); } cvCvtColor( rawImage1, yuvImage, CV_BGR2YCrCb ); if( !pause && nFrmNum >= 1 && nFrmNum < T ) { pColor = (uchar *)((yuvImage)->imageData); for(int c=0; c<imageLen; c++) { update_codebook_model(pColor, cA[c],cbBounds,nChannels); trainig_codebook(pColor, cC[c],cbBounds,nChannels); pColor += 3; } } if( nFrmNum == T) { for(c=0; c<imageLen; c++) { clear_stale_entries(cA[c]); training_clear_stale_entries(cC[c]); } } if(nFrmNum > T) { pColor = (uchar *)((yuvImage)->imageData); uchar maskPixelCodeBook; uchar maskPixelCodeBook1; uchar maskPixelCodeBook2; uchar *pMask = (uchar *)((pFrImg)->imageData); uchar *pMask1 = (uchar *)((pFrImg1)->imageData); uchar *pMask2 = (uchar *)((pFrImg2)->imageData); for(int c=0; c<imageLen; c++) { //本文中不带自动背景更新的算法输出 maskPixelCodeBook1=background_Diff(pColor, cA[c],nChannels,minMod,maxMod); *pMask1++ = maskPixelCodeBook1; //本文中带自动背景更新的算法输出 if ( !pause && det_update_codebook_cC(pColor, cC[c],cbBounds,nChannels)) { det_update_codebook_cD(pColor, cD[c],cbBounds,nChannels, nFrmNum); realtime_clear_stale_entries_cD(cD[c], nFrmNum); cD_to_cC(cD[c], cC[c], (nFrmNum - T)/5); } else { realtime_clear_stale_entries_cC(cC[c], nFrmNum); } maskPixelCodeBook2=background_Diff(pColor, cC[c],nChannels,minMod,maxMod); *pMask2++ = maskPixelCodeBook2; pColor += 3; } cvCopy(pFrImg2,ImaskCodeBookCC); if(!pause) { count_Segmentation(cC,yuvImage,nChannels,minMod,maxMod); forgratio = (float) (fgcount)/ imageLen; } } bg_model1->foreground->origin=1; bg_model->foreground->origin=1; pFrImg->origin=1; pFrImg1->origin=1; pFrImg2->origin=1; ImaskCodeBookCC->origin=1; result->origin=1; //connected_Components(pFrImg1,1,40); //connected_Components(pFrImg2,1,40); cvShowImage("基于MOG的方法[Chris Stauffer'2001]", bg_model->foreground); cvShowImage( "原视频序列图像", rawImage ); cvShowImage("三帧差分", result); cvShowImage( "不实时更新的Codebook算法[本文]",pFrImg1); cvShowImage("实时更新的Codebook算法[本文]",pFrImg2); cvShowImage("基于Bayes decision的方法[Liyuan Li'2003]", bg_model1->foreground); c = cvWaitKey(1)&0xFF; //End processing on ESC, q or Q if(c == 27 || c == 'q' || c == 'Q') break; //Else check for user input switch(c) { case 'h': help(); break; case 'p': pause ^= 1; break; case 's': singlestep = 1; pause = false; break; case 'r': pause = false; singlestep = false; break; //CODEBOOK PARAMS case 'y': case '0': ch[0] = 1; ch[1] = 0; ch[2] = 0; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'u': case '1': ch[0] = 0; ch[1] = 1; ch[2] = 0; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'v': case '2': ch[0] = 0; ch[1] = 0; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'a': //All case '3': ch[0] = 1; ch[1] = 1; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'b': //both u and v together ch[0] = 0; ch[1] = 1; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'z': printf(" Fadd加1 "); Fadd += 1; printf("Fadd=%.4d\n",Fadd); break; case 'x': printf(" Fadd减1 "); Fadd -= 1; printf("Fadd=%.4d\n",Fadd); break; case 'n': printf(" Tavgstale加1 "); Tavgstale += 1; printf("Tavgstale=%.4d\n",Tavgstale); break; case 'm': printf(" Tavgstale减1 "); Tavgstale -= 1; printf("Tavgstale=%.4d\n",Tavgstale); break; case 'i': //modify max classification bounds (max bound goes higher) for(n=0; n<nChannels; n++) { if(ch[n]) maxMod[n] += 1; printf("%.4d,",maxMod[n]); } printf(" CodeBook High Side\n"); break; case 'o': //modify max classification bounds (max bound goes lower) for(n=0; n<nChannels; n++) { if(ch[n]) maxMod[n] -= 1; printf("%.4d,",maxMod[n]); } printf(" CodeBook High Side\n"); break; case 'k': //modify min classification bounds (min bound goes lower) for(n=0; n<nChannels; n++) { if(ch[n]) minMod[n] += 1; printf("%.4d,",minMod[n]); } printf(" CodeBook Low Side\n"); break; case 'l': //modify min classification bounds (min bound goes higher) for(n=0; n<nChannels; n++) { if(ch[n]) minMod[n] -= 1; printf("%.4d,",minMod[n]); } printf(" CodeBook Low Side\n"); break; } } } cvReleaseCapture( &capture ); cvReleaseBGStatModel((CvBGStatModel**)&bg_model); cvReleaseBGStatModel((CvBGStatModel**)&bg_model1); cvDestroyWindow( "原视频序列图像" ); cvDestroyWindow( "不实时更新的Codebook算法[本文]"); cvDestroyWindow( "实时更新的Codebook算法[本文]"); cvDestroyWindow( "基于MOG的方法[Chris Stauffer'2001]"); cvDestroyWindow( "三帧差分" ); cvDestroyWindow( "基于Bayes decision的方法[Liyuan Li'2003]"); cvReleaseImage(&temp1); cvReleaseImage(&temp2); cvReleaseImage(&result); cvReleaseImage(&result1); cvReleaseImage(&result2); cvReleaseImage(&pFrImg); cvReleaseImage(&pFrImg1); cvReleaseImage(&pFrImg2); if(yuvImage) cvReleaseImage(&yuvImage); if(rawImage) cvReleaseImage(&rawImage); if(rawImage1) cvReleaseImage(&rawImage1); if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC); delete [] cA; delete [] cC; delete [] cD; } else { printf("\n\nDarn, Something wrong with the parameters\n\n"); help(); } return 0; }
int main(int argc,char **argv) { if(argc != 2) { printf("usage: %s <mode>\n0 - integrate webcam\n1 - external webcam\n",argv[0]); exit(-1); } else { int web=atoi(argv[1]); if(web >= 0 && web <= 1) { CvCapture *cam = cvCaptureFromCAM(web); cvSetCaptureProperty(cam,CV_CAP_PROP_FRAME_WIDTH,640); cvSetCaptureProperty(cam,CV_CAP_PROP_FRAME_HEIGHT,480); IplImage *img = cvQueryFrame(cam); IplImage *copia = cvCreateImage(cvGetSize(img),8,3); IplImage *prima = NULL; IplImage *binary = cvCreateImage(cvGetSize(img),8,1); IplImage *ris = cvCreateImage(cvGetSize(img),8,3); cvNamedWindow(NOME,1); //Variabili per prendere l'orario e la data correnti time_t tempo; struct tm *timeobj; time(&tempo); timeobj = localtime(&tempo); char nome[25]; long int num=0; //Funzione per inserire i dati del tempo in nome strftime(nome,24,"%H-%M-%S_%F.avi",timeobj); //Creo il writer che si occuperà di scrivere i vari frame presi come video compresso in formato divx CvVideoWriter *video = cvCreateVideoWriter(nome,CV_FOURCC('D','I','V','X'),15,cvSize(640,480),1); //Inizializzo i font CvFont scritta,info; cvInitFont(&scritta,CV_FONT_HERSHEY_SIMPLEX,1.0,1.0,0,5,CV_AA); cvInitFont(&info,CV_FONT_HERSHEY_SIMPLEX,.6,.6,0,1,6); char tasto; int i,j,trovato=0,scelta,step = binary->widthStep/sizeof(uchar); uchar *target = (uchar*)binary->imageData; //Scelta fra dinamica e statica do { printf("-- Scelta modalita' --\n1)Dinamica -- Se ci saranno variazioni tra un frame e l'altro\n2)Statica -- Se ci sono variazioni fra un determinato frame e il frame corrente\nScelta: "); scanf("%1d",&scelta); }while(scelta < 1 || scelta > 2); while(img) { //Ruoto l'immagine cvFlip(img,img,1); //Prendo le informazioni sul tempo time(&tempo); timeobj = localtime(&tempo); strftime(nome,24,"%H:%M:%S %F",timeobj); //Scrivo le info a schermo cvPutText(img,nome,cvPoint(415,475),&info,CV_RGB(0,255,255)); //Copio il frame cvCopy(img,copia); riduciNoise(img,img); //Dinamica if(scelta == 1) { //Se è il primo frame preso if(prima == NULL) { prima = cvCreateImage(cvGetSize(img),8,3); //Copio img in prima cvCopy(img,prima); } else { //Se non è il primo frame controllo se ci sono differenze cvAbsDiff(img,prima,ris); //Da colore a grigia cvCvtColor(ris,binary,CV_BGR2GRAY); //Il threshold dell'immagine cvThreshold(binary,binary,62,255,CV_THRESH_BINARY); riduciNoise(binary,binary); cvCopy(img,prima); } } //Statica else { //Se ho preso il frame da monitorare if(prima != NULL) { cvAbsDiff(img,prima,ris); cvCvtColor(ris,binary,CV_BGR2GRAY); cvThreshold(binary,binary,62,255,CV_THRESH_BINARY); riduciNoise(binary,binary); } } //Controllo l'immagine pixel per pixel for(i=0; i < binary->height; i++) { for(j=0; j < binary->width; j++) { if(target[i*step+j] == 255) trovato = 1; } } //Se trovo un cambiamento if(trovato) { num++; //Inserisco "REC O" nell'immagine cvPutText(copia,"REC",cvPoint(10,25),&scritta,CV_RGB(255,0,0)); cvCircle(copia,cvPoint(100,15),5,CV_RGB(255,0,0),20,8); //Salvo il frame trovato cvWriteFrame(video,copia); trovato = 0; } //Mostro l'immagine cvShowImage(NOME,copia); tasto = cvWaitKey(15); if(tasto == 'q') break; //Se premo v salvo il frame da monitorare else if(tasto == 'v' && scelta == 2) { prima = cvCreateImage(cvGetSize(img),8,3); cvCopy(img,prima); } img = cvQueryFrame(cam); } //Se ho preso dei frame if(num != 0) { //Scrivo il video cvReleaseVideoWriter(&video); printf("Video %s salvato\n",nome); } } else puts("webcam not found"); } return 0; }
int main (int argc, char **argv) { CvCapture *capture = 0; IplImage *frame, *frame_copy = 0; cascade = (CvHaarClassifierCascade *) cvLoad ("yolo.xml", 0, 0, 0); if (!cascade) { printf ("ERROR: Could not load classifier cascade\n"); return -1; } storage = cvCreateMemStorage (0); capture = cvCaptureFromCAM (0); if (capture){ int j = 0; for (;;){ FILE *fin; int i = 0; flag = 0, f = 0; if(!cvGrabFrame (capture)){ break; } frame = cvRetrieveFrame (capture); if (!frame){ break; } if (!frame_copy){ frame_copy = cvCreateImage( cvSize (frame->width, frame->height), IPL_DEPTH_8U, frame->nChannels); } system ("ps -e | grep totem > sample.txt"); fin = fopen ("sample.txt", "r"); fflush (fin); while (!feof (fin)){ char a[40]; fscanf (fin, "%s\n", a); if (a[i] == 't' && a[i + 1] == 'o' && a[i + 2] == 't' && a[i + 3] == 'e' && a[i + 4] == 'm'){ f = 1; break; } else{ f = 0; } } fclose (fin); if (frame->origin == IPL_ORIGIN_TL){ cvCopy (frame, frame_copy, 0); } else{ cvFlip (frame, frame_copy, 0); } flag = detect_and_draw (frame_copy); if (f == 0) { printf("no totem playing\n please switch off the application from the command centre\n or open a video file\n"); sleep (5); } else if (flag == 0 && f == 1 && played == 1) { system ("totem --pause"); played = 0; } else if (flag == 1 && f == 1 && played == 0) { system ("totem --play"); played = 1; } if (cvWaitKey (10) >= 0) break; }
int main(int argc, char **argv) { CvCapture *capture = 0; int resize_factor = 100; if(argc > 1) { std::cout << "Openning: " << argv[1] << std::endl; capture = cvCaptureFromAVI(argv[1]); } else { capture = cvCaptureFromCAM(0); resize_factor = 50; // set size = 50% of original image } if(!capture) { std::cerr << "Cannot initialize video!" << std::endl; return 1; } IplImage *frame_aux = cvQueryFrame(capture); IplImage *frame = cvCreateImage(cvSize((int)((frame_aux->width*resize_factor)/100) , (int)((frame_aux->height*resize_factor)/100)), frame_aux->depth, frame_aux->nChannels); cvResize(frame_aux, frame); /* Background Subtraction Methods */ IBGS *bgs; /*** Default Package ***/ bgs = new FrameDifferenceBGS; //bgs = new StaticFrameDifferenceBGS; //bgs = new WeightedMovingMeanBGS; //bgs = new WeightedMovingVarianceBGS; //bgs = new MixtureOfGaussianV1BGS; //bgs = new MixtureOfGaussianV2BGS; //bgs = new AdaptiveBackgroundLearning; //bgs = new GMG; /*** DP Package (adapted from Donovan Parks) ***/ //bgs = new DPAdaptiveMedianBGS; //bgs = new DPGrimsonGMMBGS; //bgs = new DPZivkovicAGMMBGS; //bgs = new DPMeanBGS; //bgs = new DPWrenGABGS; //bgs = new DPPratiMediodBGS; //bgs = new DPEigenbackgroundBGS; //bgs = new DPTextureBGS; /*** TB Package (adapted from Thierry Bouwmans) ***/ //bgs = new T2FGMM_UM; //bgs = new T2FGMM_UV; //bgs = new T2FMRF_UM; //bgs = new T2FMRF_UV; //bgs = new FuzzySugenoIntegral; //bgs = new FuzzyChoquetIntegral; /*** JMO Package (adapted from Jean-Marc Odobez) ***/ //bgs = new MultiLayerBGS; /*** PT Package (adapted from Hofmann) ***/ //bgs = new PixelBasedAdaptiveSegmenter; /*** LB Package (adapted from Laurence Bender) ***/ //bgs = new LBSimpleGaussian; //bgs = new LBFuzzyGaussian; //bgs = new LBMixtureOfGaussians; //bgs = new LBAdaptiveSOM; //bgs = new LBFuzzyAdaptiveSOM; /*** LBP-MRF Package (adapted from Csaba Kertész) ***/ //bgs = new LbpMrf; /*** AV Package (adapted from Antoine Vacavant) ***/ //bgs = new VuMeter; /*** EG Package (adapted from Ahmed Elgammal) ***/ //bgs = new KDE; int key = 0; while(key != 'q') { frame_aux = cvQueryFrame(capture); if(!frame_aux) break; cvResize(frame_aux, frame); cv::Mat img_input(frame); cv::imshow("input", img_input); cv::Mat img_mask; cv::Mat img_bkgmodel; bgs->process(img_input, img_mask, img_bkgmodel); // automatically shows the foreground mask image //if(!img_mask.empty()) // do something key = cvWaitKey(33); } delete bgs; cvDestroyAllWindows(); cvReleaseCapture(&capture); return 0; }
void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, image *labels, int classes, int frame_skip) { //skip = frame_skip; int delay = frame_skip; demo_names = names; demo_labels = labels; demo_classes = classes; demo_thresh = thresh; printf("Demo\n"); net = parse_network_cfg(cfgfile); if(weightfile){ load_weights(&net, weightfile); } set_batch_network(&net, 1); srand(2222222); if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(!cap) error("Couldn't connect to webcam.\n"); detection_layer l = net.layers[net.n-1]; int j; avg = (float *) calloc(l.outputs, sizeof(float)); for(j = 0; j < FRAMES; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float)); for(j = 0; j < FRAMES; ++j) images[j] = make_image(1,1,3); boxes = (BOX *)calloc(l.side*l.side*l.n, sizeof(BOX)); probs = (float **)calloc(l.side*l.side*l.n, sizeof(float *)); for(j = 0; j < l.side*l.side*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float *)); pthread_t fetch_thread; pthread_t detect_thread; fetch_in_thread(0); det = in; det_s = in_s; fetch_in_thread(0); detect_in_thread(0); disp = det; det = in; det_s = in_s; for(j = 0; j < FRAMES/2; ++j){ fetch_in_thread(0); detect_in_thread(0); disp = det; det = in; det_s = in_s; } int count = 0; cvNamedWindow("Demo", CV_WINDOW_NORMAL); cvMoveWindow("Demo", 0, 0); cvResizeWindow("Demo", 1352, 1013); double before = get_wall_time(); while(1){ ++count; if(1){ if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed"); if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed"); show_image(disp, "Demo"); int c = cvWaitKey(1); if (c == 10){ if(frame_skip == 0) frame_skip = 60; else if(frame_skip == 4) frame_skip = 0; else if(frame_skip == 60) frame_skip = 4; else frame_skip = 0; } pthread_join(fetch_thread, 0); pthread_join(detect_thread, 0); if(delay == 0){ free_image(disp); disp = det; } det = in; det_s = in_s; }else { fetch_in_thread(0); det = in; det_s = in_s; detect_in_thread(0); if(delay == 0) { free_image(disp); disp = det; } show_image(disp, "Demo"); cvWaitKey(1); } --delay; if(delay < 0){ delay = frame_skip; double after = get_wall_time(); float curr = 1./(after - before); fps = curr; before = after; } } }
void FaceTrackDemo(const char* savefile) { float vwidth = 240, vheight = 180; // images coming from webcam will be resized to these dimensions (smaller images = faster runtime) ClfStrongParams *clfparams; SimpleTracker tr; SimpleTrackerParams trparams; //////////////////////////////////////////////////////////////////////////////////////////// // PARAMS // strong model switch( 2 ){ case 1: // OBA1 clfparams = new ClfAdaBoostParams(); ((ClfAdaBoostParams*)clfparams)->_numSel = 50; ((ClfAdaBoostParams*)clfparams)->_numFeat = 250; trparams._posradtrain = 1.0f; break; case 2: // MILTrack clfparams = new ClfMilBoostParams(); ((ClfMilBoostParams*)clfparams)->_numSel = 50; ((ClfMilBoostParams*)clfparams)->_numFeat = 250; trparams._posradtrain = 4.0f; break; } // feature parameters FtrParams *ftrparams; HaarFtrParams haarparams; ftrparams = &haarparams; clfparams->_ftrParams = ftrparams; // online boosting parameters clfparams->_ftrParams = ftrparams; // tracking parameters trparams._init_negnumtrain = 65; trparams._init_postrainrad = 3.0f; trparams._srchwinsz = 25; trparams._negnumtrain = 65; // set up video CvCapture* capture = cvCaptureFromCAM( -1 ); if( capture == NULL ){ abortError(__LINE__,__FILE__,"Camera not found!"); return; } //////////////////////////////////////////////////////////////////////////////////////////// // TRACK // print usage fprintf(stderr,"MILTRACK FACE DEMO\n===============================\n"); fprintf(stderr,"This demo uses the OpenCV face detector to initialize the tracker.\n"); fprintf(stderr,"Commands:\n"); fprintf(stderr,"\tPress 'q' to QUIT\n"); fprintf(stderr,"\tPress 'r' to RE-INITIALIZE\n\n"); // grab first frame Matrixu frame,framer,framedisp; Matrixu::CaptureImage(capture,framer); frame = framer.imResize(vheight,vwidth); // save output CvVideoWriter *w=NULL; // initialize with face location while( !Tracker::initFace(&trparams,frame) ){ Matrixu::CaptureImage(capture,framer); frame = framer.imResize(vheight,vwidth); frame.display(1,2); } ftrparams->_height = (uint)trparams._initstate[2]; ftrparams->_width = (uint)trparams._initstate[3]; //这里初始化了 tr.init(frame, trparams, clfparams); StopWatch sw(true); double ttime=0.0; double probtrack=0.0; // track for (int cnt = 0; Matrixu::CaptureImage(capture,framer); cnt++) { frame = framer.imResize(vheight,vwidth); tr.track_frame(frame, framedisp); // grab tracker confidence // initialize video output if( savefile != NULL && w==NULL ){ w = cvCreateVideoWriter( savefile, CV_FOURCC('I','Y','U','V'), 15, cvSize(framedisp.cols(), framedisp.rows()), 3 ); } // display and save frame framedisp._keepIpl=true; framedisp.display(1,2); if( w != NULL ) Matrixu::WriteFrame(w, framedisp); framedisp._keepIpl=false; framedisp.freeIpl(); char q=cvWaitKey(1); ttime = sw.Elapsed(true); fprintf(stderr,"%s%d Frames/%f sec = %f FPS, prob=%f",ERASELINE,cnt,ttime,((double)cnt)/ttime,probtrack); // quit if( q == 'q' ) break; // restart with face detector else if( q == 'r' || probtrack<0 ) { while( !Tracker::initFace(&trparams,frame) && q!='q' ){ Matrixu::CaptureImage(capture,framer); frame = framer.imResize(vheight,vwidth); frame.display(1,2); q=cvWaitKey(1); } if( q == 'q' ) break; // re-initialize tracker with new parameters ftrparams->_height = (uint)trparams._initstate[2]; ftrparams->_width = (uint)trparams._initstate[3]; clfparams->_ftrParams = ftrparams; fprintf(stderr,"\n"); tr.init(frame, trparams, clfparams); } } // clean up if( w != NULL ) cvReleaseVideoWriter( &w ); cvReleaseCapture( &capture ); }
// -------------------------------------------------------------------------- // main(Number of arguments, Argument values) // Description : This is the entry point of the program. // Return value : SUCCESS:0 ERROR:-1 // -------------------------------------------------------------------------- int main(int argc, char **argv) { // Initialize // If drone is not connected, initialise webcam if (!ardrone.open()) { printf("Drone failed to connect.\n"); isDroneConnected = false; } else isDroneConnected = true; //// DEBUGGING //isDroneConnected = false; quitProgram = false; int patternCount = 0; visiblePattern = 0; //lastVisiblePattern = 0; absoluteControl = false; vx = 0.0, vy = 0.0, vz = 0.0, vr = 0.0; points = 0, pSaved = 0, cSaved = 0; peoplePicked = false, cratePicked = false; gameTimeStart = cvGetTickCount(); gameTimeLeft = 0; gameOn = false; // Loading patterns LoadPattern(filename1, patternLibrary, patternCount); LoadPattern(filename2, patternLibrary, patternCount); LoadPattern(filename3, patternLibrary, patternCount); LoadPattern(filename4, patternLibrary, patternCount); LoadPattern(filename5, patternLibrary, patternCount); LoadPattern(filename6, patternLibrary, patternCount); LoadPattern(filename7, patternLibrary, patternCount); LoadPattern(filename8, patternLibrary, patternCount); LoadPattern(filename9, patternLibrary, patternCount); LoadPattern(filename10, patternLibrary, patternCount); LoadPattern(filename11, patternLibrary, patternCount); cout << patternCount << " patterns are loaded." << endl; // Pattern detector arguments int norm_pattern_size = PAT_SIZE; double fixed_thresh = 40; double adapt_thresh = 5;//non-used with FIXED_THRESHOLD mode int adapt_block_size = 45;//non-used with FIXED_THRESHOLD mode double confidenceThreshold = 0.35; int mode = 2;//1:FIXED_THRESHOLD, 2: ADAPTIVE_THRESHOLD // Set to vertical drone camera ardrone.setCamera(1); // Initialise PatternDetector PatternDetector myDetector(fixed_thresh, adapt_thresh, adapt_block_size, confidenceThreshold, norm_pattern_size, mode); vector<Point2f> coordinates; Point2f point; coordinates.push_back(point); coordinates.push_back(point); coordinates.push_back(point); coordinates.push_back(point); coordinates.push_back(point); for (int i = 0; i <= patternCount; i++) { // Initialise each pattern coordinates patternsCoordinates.push_back(coordinates); //// Initialise each pattern timer //int timer; //timers.push_back(timer); } // Start main loop while (1) { // check to terminate program if (quitProgram) break; // Update if (!ardrone.update()) break; // OpenCV image IplImage *img; // Check which image source to feed to OpenCV if (isDroneConnected) { // Get drone image img = ardrone.getImage(); } else { // Capture webcam feed webcamCapture = cvCaptureFromCAM(0); // Get webcam image img = cvQueryFrame(webcamCapture); } // Create image matrix Mat imgMat = Mat(img); // Render the HUD Mat result; if (isDroneConnected) result = HUD(imgMat, WIDTH, HEIGHT); else result = HUD(imgMat, WIDTH, WEBCAM_HEIGHT); // Create a buffer of the image Mat buffer; result.copyTo(buffer); // Run the detector myDetector.detect(imgMat, cameraMatrix, distortions, patternLibrary, detectedPattern); // Augment the input frame (and print out the properties of pattern if you want) if (detectedPattern.size()) { for (unsigned int i = 0; i < detectedPattern.size(); i++) { // Get pattern id int id = detectedPattern[i].id; //// Start current pattern seen timer //int now = cvGetTickCount(); //int passedSinceSeen = ((now - timers[detectedPattern[i].id]) / (cvGetTickFrequency() * 1000)) / 1000; //// Only set visible pattern if detected a pattern for more than 1 second //if (passedSinceSeen > SEEN_TIMER) SetVisiblePattern(detectedPattern[i].id); SetVisiblePattern(id); // Drawing detectedPattern.at(i).draw(buffer, cameraMatrix, distortions); // Get pattern corner and centre coordinates Point2f ul, ur, lr, ll, centre; detectedPattern.at(i).getCoordinates(ul, ur, lr, ll, centre, cameraMatrix, distortions); // Store coordinates patternsCoordinates[id][0] = ul; patternsCoordinates[id][1] = ur; patternsCoordinates[id][2] = lr; patternsCoordinates[id][3] = ll; patternsCoordinates[id][4] = centre; if (isDroneConnected) CheckGamePatterns(WIDTH, HEIGHT, id); else CheckGamePatterns(WIDTH, WEBCAM_HEIGHT, id); } } else { //for (int i = 0; i < patternCount; i++) { // // reset pattern timers // timers[i] = cvGetTickCount(); //} // Reset visible pattern visiblePattern = 0; } //// Get elapsed time since last saw pattern //lastVPElapsed = cvGetTickCount(); //int elapsedTime = ((lastVPElapsed - lastVPStart) / (cvGetTickFrequency() * 1000)) / 1000; //// Check if reset timer limit passed and reset last visible pattern //if (elapsedTime < 300) // if (RESET_TIMER - elapsedTime < 0) lastVisiblePattern = 0; // Get key input int key = cvWaitKey(33); // Providing key controls KeyControls(key); // Autonomous drone control KeepGoodAltitude(); AutoAdjustPosition(key); // Always go back to hovering if no user input, no pattern visible and too old last visible pattern (0) if (absoluteControl && key < 0) Hover(); else if (key < 0 && visiblePattern == 0/* && lastVisiblePattern == 0*/) Hover(); // Drone movement ardrone.move3D(vx, vy, vz, vr); // Check game over if (gameTimeLeft < 0) { gameOn = false; peoplePicked = false; cratePicked = false; } // Combine buffer with original image + opacity double opacity = 0.4; addWeighted(buffer, opacity, result, 1 - opacity, 0, result); // Initialise window with OpenGL support namedWindow("AR.Drone", WINDOW_OPENGL); // Show the OpenCV image in a new window AR.Drone imshow("AR.Drone", result); // Give HighGUI time to process the draw requests cvWaitKey(1); // Clear pattern for next tick detectedPattern.clear(); } // Stop processes before quitting Stop(); return 0; }