void mouseVideo1Callback(int event, int x, int y, int flags, void* param) { static bool clicked=false; IplImage *tmpImage; switch (event) { case CV_EVENT_LBUTTONDOWN: tmpImage=cvClone(frame1); printf("Mouse X, Y: %d, %d \n", x, y); clicked=true; orig1=cvPoint(x,y); break; case CV_EVENT_MOUSEMOVE: if (clicked) { tmpImage=cvClone(frame1); dest1=cvPoint(x,y); cvCircle(tmpImage,orig1, 3, CV_RGB(0,255,0),-1,8,0); cvCircle(tmpImage,dest1,3, CV_RGB(0,255,0),-1,8,0); cvRectangle(tmpImage, orig1, dest1, CV_RGB(0,255,0),1,8,0); cvShowImage("video1", tmpImage); } break; case CV_EVENT_LBUTTONUP: clicked=false; break; } }
Img GaborImage::GaborTransform(Img Image, int Frequency, int Orientation) { orientation = Orientation; CalculateKernel(Orientation, Frequency); Img retImg = (IplImage*) cvClone(Image); Img gabor_real = (IplImage*) cvClone(Image); Img gabor_img = (IplImage*) cvClone(Image); cvFilter2D(Image, gabor_real, KernelRealData); //image.Convolution(this.KernelRealData); cvFilter2D(Image, gabor_img , KernelImgData); //image.Convolution(this.KernelImgData); cvPow(gabor_real, gabor_real, 2); cvPow(gabor_img, gabor_img, 2); // Img gabor = (gabor_real + gabor_img).Pow(0.5); cvAdd(gabor_real, gabor_img, retImg); cv::Mat in = retImg; cv::Mat out; cv::sqrt(in, out); IplImage dst_img = out; cvReleaseImage(&gabor_real); cvReleaseImage(&gabor_img); retImg = (IplImage*) cvClone(&dst_img); return retImg; }
/** * Mouse callback function that allows user to specify the * initial object region. * Parameters are as specified in OpenCV documentation. */ void icvMouseCallback( int event, int x, int y, int flags, void* param ) { IcvMouseParam* p = (IcvMouseParam*)param; IplImage* clone; static int pressed = false; /* on left button press, remember first corner of rectangle around object */ if( event == CV_EVENT_LBUTTONDOWN ) { p->loc1.x = x; p->loc1.y = y; pressed = true; } /* on left button up, finalize the rectangle and draw it */ else if( event == CV_EVENT_LBUTTONUP ) { p->loc2.x = x; p->loc2.y = y; clone = (IplImage*)cvClone( p->frame ); cvRectangle( clone, p->loc1, p->loc2, CV_RGB(255,255,255), 1, 8, 0 ); cvShowImage( p->win_name, clone ); cvReleaseImage( &clone ); pressed = false; } /* on mouse move with left button down, draw rectangle */ else if( event == CV_EVENT_MOUSEMOVE && flags & CV_EVENT_FLAG_LBUTTON ) { clone = (IplImage*)cvClone( p->frame ); cvRectangle( clone, p->loc1, cvPoint(x, y), CV_RGB(255,255,255), 1, 8, 0 ); cvShowImage( p->win_name, clone ); cvReleaseImage( &clone ); } }
void Blink::TrackEyes(IplImage* newFrame, IplImage* mask) { if(!oriImage) oriImage=(IplImage*)cvClone(newFrame); else cvCopy(newFrame,oriImage,NULL); IplImage* temp=(IplImage*)cvClone(newFrame); if(mask!=NULL) cvAnd(newFrame,mask,temp); if(leftEyeTracker && rightEyeTracker){ leftEye=leftEyeTracker->getNextLocation(temp); rightEye=rightEyeTracker->getNextLocation(temp); } else{ Detect(temp); } }
void output_filter_bank(FilterBank *fb) { int i = 0; for (int bw = 0; bw < N_BANDWIDTHS; bw++) { for (int frq = 0; frq < N_FREQS; frq++) { for (int orn = 0; orn < N_ORIENTATIONS; orn++) { char out_file_name[256]; sprintf(out_file_name, "%s/%s_%02d_%02.2f_%02.2f.png", OUTPUT_PATH, "FILTER", bandwidths[bw], spatial_frequencies[frq], orientations[orn]); puts(out_file_name); CvMat *out = cvClone(fb->filters[i]->real); cvNormalize(out, out, 255, 0, CV_MINMAX, NULL); cvSaveImage(out_file_name, out, NULL); cvReleaseMat(&out); i++; } } } }
/* The function will return the connected components in 'comp', as well as the number of connected components 'nc'. At this point, we have to determine whether the components are eye pair or not. We'll use experimentally derived heuristics for this, based on the width, height, vertical distance, and horizontal distance of the components. To make things simple, we only proceed if the number of the connected components is 2.*/ int get_connected_components(IplImage* img, IplImage* prev, CvRect window, CvSeq** comp) { IplImage* _diff; cvZero(diff); /* apply search window to images */ cvSetImageROI(img, window); cvSetImageROI(prev, window); cvSetImageROI(diff, window); /* motion analysis */ cvSub(img, prev, diff, NULL); cvThreshold(diff, diff, 5, 255, CV_THRESH_BINARY); cvMorphologyEx(diff, diff, NULL, kernel, CV_MOP_OPEN, 1); /* reset search window */ cvResetImageROI(img); cvResetImageROI(prev); cvResetImageROI(diff); _diff = (IplImage*)cvClone(diff); /* get connected components */ int nc = cvFindContours(_diff, storage, comp, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0)); cvClearMemStorage(storage); cvReleaseImage(&_diff); return nc; }
/** * @brief Función del ratón que permite al usuario seleccionar el objeto a rastrear * @param event El tipo de evento generado por el ratón * @param x La coordenada x del píxel seleccionado * @param y La coordenada y del píxel seleccionado * @param flags Características del ratón * @param param Mis propios parámetros donde guardo las coordenadas y demás */ void mouse( int event, int x, int y, int flags, void *param ) { params *p = (params*)param; static int pressed = FALSE; // Al pulsar el botón izquierdo, guardamos la primera esquina del rectángulo que cubre al objeto if( event == CV_EVENT_LBUTTONDOWN ) { p->loc1.push_back(CvPoint()); p->loc1[p->n].x = x; p->loc1[p->n].y = y; pressed = TRUE; } // Al soltar el botón del ratón, finaliza el rectángulo y lo dibuja en negro else if( event == CV_EVENT_LBUTTONUP ) { p->loc2.push_back(CvPoint()); p->loc2[p->n].x = x; p->loc2[p->n].y = y; cvReleaseImage( &(p->cur_img) ); cvRectangle( first_frame, p->loc1[p->n], p->loc2[p->n], CV_RGB(0,0,0), 1, 8, 0 ); cvShowImage( win_name, first_frame ); pressed = FALSE; p->n++; } // Al arrastrar el ratón va dibujando el rectángulo en blanco else if( event == CV_EVENT_MOUSEMOVE && flags & CV_EVENT_FLAG_LBUTTON ) { p->cur_img = (IplImage*) cvClone( first_frame ); cvRectangle( p->cur_img, p->loc1[p->n], cvPoint(x, y), CV_RGB(255,255,255), 1, 8, 0 ); cvShowImage( win_name, p->cur_img ); cvReleaseImage( &(p->cur_img) ); } }
static void mouseHandler(int event, int x, int y, int flags, void *param) { /* user press left button */ if(event == CV_EVENT_LBUTTONDOWN && !drag) { point = cvPoint(x, y); drag = 1; } /* user drag the mouse */ if(event == CV_EVENT_MOUSEMOVE && drag) { img1 = (IplImage *) cvClone(img0); cvRectangle(img1, point, cvPoint(x, y), CV_RGB(255, 0, 0), 1, 8, 0); cvShowImage(window_name.c_str(), img1); cvReleaseImage(&img1); } /* user release left button */ if(event == CV_EVENT_LBUTTONUP && drag) { *bb = cvRect(point.x, point.y, x - point.x, y - point.y); drag = 0; } }
int main(int argc, char** argv) { CvSeq* comp = 0; CvRect window, eye; int key, nc, found; int text_delay, stage = STAGE_INIT; init(); while (key != 'q') { frame = cvQueryFrame(capture); if (!frame) exit_nicely("cannot query frame!"); frame->origin = 0; if (stage == STAGE_INIT) window = cvRect(0, 0, frame->width, frame->height); cvCvtColor(frame, gray, CV_BGR2GRAY); nc = get_connected_components(gray, prev, window, &comp); if (stage == STAGE_INIT && is_eye_pair(comp, nc, &eye)) { delay_frames(5); cvSetImageROI(gray, eye); cvCopy(gray, tpl, NULL); cvResetImageROI(gray); stage = STAGE_TRACKING; text_delay = 10; } if (stage == STAGE_TRACKING) { found = locate_eye(gray, tpl, &window, &eye); if (!found || key == 'r') stage = STAGE_INIT; if (is_blink(comp, nc, window, eye)){ text_delay = 10; system("/bin/bash ./blinked.sh"); } DRAW_RECTS(frame, diff, window, eye); DRAW_TEXT(frame, "blink!", text_delay, 1); } cvShowImage(wnd_name, frame); cvShowImage(wnd_debug, diff); prev = (IplImage*)cvClone(gray); key = cvWaitKey(15); } exit_nicely(NULL); }
IplImage* Blink::MarkAll(CvScalar color) { if(!oriImage) return NULL; IplImage* temp=(IplImage*) cvClone(oriImage); if(leftEye) cvRectangle(temp,cvPoint(leftEye->x,leftEye->y),cvPoint(leftEye->x+leftEye->width,leftEye->y+leftEye->height),color,1,8,0); if(rightEye) cvRectangle(temp,cvPoint(rightEye->x,rightEye->y),cvPoint(rightEye->x+rightEye->width,rightEye->y+rightEye->height),color,1,8,0); return temp; }
void ImageComponentsSetImage(ImageComponents *image_componentes, IplImage *image) { if(image_componentes->image) { cvReleaseImage(&image_componentes->image); image_componentes->image = NULL; } image_componentes->image = cvClone(image); }
void ImageComponentsSetComponent(ImageComponents *image_componentes, int component, IplImage *image) { if(component<3) { if(image_componentes->img[component]) { cvReleaseImage(&image_componentes->img[component]); image_componentes->img[component] = NULL; } image_componentes->img[component] = cvClone(image); } }
// TODO: member of Gui // --> problem: callback function mouseHandler as member! int getBBFromUser(IplImage *img, CvRect &rect, Gui *gui, const string &message) { window_name = gui->windowName(); cvDestroyWindow(window_name.c_str()); cvNamedWindow(window_name.c_str(), CV_WINDOW_AUTOSIZE); cvMoveWindow(window_name.c_str(), 100, 100); img0 = (IplImage *)cvClone(img); rect = cvRect(-1, -1, -1, -1); bb = ▭ bool correctBB = false; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8); cvSetMouseCallback(window_name.c_str(), mouseHandler, NULL); cvPutText(img0, message.c_str(), cvPoint(500, 60), &font, cvScalar(255, 255, 0)); cvShowImage(window_name.c_str(), img0); while (!correctBB) { char key = cvWaitKey(0); if (tolower(key) == 'q') { return PROGRAM_EXIT; } if (((key == '\n') || (key == '\r') || (key == '\r\n')) && (bb->x != -1) && (bb->y != -1)) { correctBB = true; } } if (rect.width < 0) { rect.x += rect.width; rect.width = abs(rect.width); } if (rect.height < 0) { rect.y += rect.height; rect.height = abs(rect.height); } cvSetMouseCallback(window_name.c_str(), NULL, NULL); cvReleaseImage(&img0); cvReleaseImage(&img1); return SUCCESS; }
void output_classes(CvMat **classes, CvMat *orig) { char file[256]; for (int i = 0; i < K_CLUSTERS; i++) { CvMat *masked = cvClone(orig); cvZero(masked); cvCopy(orig, masked, classes[i]); sprintf(file, "%s/%s_%d.png", OUTPUT_PATH, "class", i); cvSaveImage(file, masked, NULL); cvReleaseMat(&masked); } }
/* Converts an image to 32-bit grayscale @param img a 3-channel 8-bit color (BGR) or 8-bit gray image @return Returns a 32-bit grayscale image */ IplImage* convert_to_gray32( IplImage* img ) { IplImage* gray8, * gray32; gray8 = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 ); gray32 = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 ); if( img->nChannels == 1 ) gray8 = (IplImage* )cvClone( img ); else cvCvtColor( img, gray8, CV_BGR2GRAY ); cvConvertScale( gray8, gray32, 1.0 / 255.0, 0 ); cvReleaseImage( &gray8 ); return gray32; }
/* Converts an image to 32-bit grayscale @param img a 3-channel 8-bit color (BGR) or 8-bit gray image @return Returns a 32-bit grayscale image */ static IplImage* convert_to_gray32( IplImage* img ) { IplImage* gray8, * gray32; CvSize s = {img->width, img->height}; gray32 = cvCreateImage( s, IPL_DEPTH_32F, 1 ); if( img->nChannels == 1 ) gray8 = cvClone( img ); else { gray8 = cvCreateImage( s, IPL_DEPTH_8U, 1 ); cvCvtColor( img, gray8, CV_BGR2GRAY ); } cvConvertScale( gray8, gray32, 1.0 / 255.0, 0 ); cvReleaseImage( &gray8 ); return gray32; }
IplImage *get_gray(const IplImage *img) { if (!img) { return NULL; } IplImage *gray8, *gray32; gray32 = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1); if (img->nChannels == 1) { gray8 = (IplImage *)cvClone(img); } else { gray8 = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); cvCvtColor(img, gray8, CV_BGR2GRAY); } cvConvertScale(gray8, gray32, 1.0 / 255.0, 0); cvReleaseImage(&gray8); return gray32; }
void changeSemaphore(IplImage *sem, int light, int semNum) { IplImage* lights; lights=cvClone(semaphore1); switch(light) { case GREEN: cvCircle(lights,cvPoint(200,398), 48, CV_RGB(0,255,0),-1,8,0); break; case YELLOW: cvCircle(lights,cvPoint(200,255), 48, CV_RGB(255,255,0),-1,8,0); break; case RED: cvCircle(lights,cvPoint(200,112),48, CV_RGB(255,0,0),-1,8,0); break; } if (semNum==1) cvShowImage("semaforo 1", lights); else if(semNum==2) cvShowImage("semaforo 2", lights); }
/* Converts an image to 32-bit grayscale @param img a 3-channel 8-bit color (BGR) or 8-bit gray image @return Returns a 32-bit grayscale image */ static IplImage* convert_to_gray32( IplImage* img ) { IplImage* gray8, * gray32; gray32 = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );//创建32位单通道图像 //首先将原图转换为8位单通道图像 if( img->nChannels == 1 )//若原图本身就是单通道,直接克隆原图 gray8 = cvClone( img ); else//若原图是3通道图像 { gray8 = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );//创建8位单通道图像 cvCvtColor( img, gray8, CV_BGR2GRAY );//将原图转换为8为单通道图像 } //然后将8为单通道图像gray8转换为32位单通道图像,并进行归一化处理(除以255) cvConvertScale( gray8, gray32, 1.0 / 255.0, 0 ); cvReleaseImage( &gray8 );//释放临时图像 return gray32;//返回32位单通道图像 }
//////////////////////////////////////////////////////////////////////////// // MAIN //////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// /// /// Main program. /// int main( int argc, char *argv[] ) { CvCapture *video; CvVideoWriter *writer; IplImage *frame; IplImage *hsv; char *win_name = "Source Frame"; char key = 0; tcp_connection *con; int frame_count = 0, result; int i; max_frames = 5000; unsigned char *byte_stream; // register signal handler for SIGINT und SIGPIPE // the latter occurs if the server terminates the connection /* DEBUG_PRINT( DEBUG_NOTE, "registering signal" ) if ( signal( SIGINT, sig_handler ) == SIG_ERR || signal( SIGPIPE, sig_handler ) == SIG_ERR ) { fprintf( stderr, "failed to register signal handler.\n" ); exit( 1 ); } */ parse_args( argc, argv ); // open the capture source switch ( source ) { case SOURCE_FILE: video = cvCreateFileCapture( infilename ); break; case SOURCE_CAM: video = cvCreateCameraCapture( camera ); break; default: fprintf( stderr, "strange source\n" ); exit( 1 ); } if ( !video ) { fprintf( stderr, "unable to capture source\n" ); exit( 1 ); } // connect to remote host con = tcp_connection_create( host, port ); if ( !con ) { fprintf( stderr, "unable to connect to %s, port %d\n", host, port ); exit( 1 ); } printf( "Connected to %s, port %d.\n", host, port ); frame = cvQueryFrame( video ); if ( !frame ) { fprintf( stderr, "unable to capture video.\n" ); exit( 1 ); } number_of_frames++; if ( netimage_send_header( con, frame ) <= 0 ) { fprintf( stderr, "unable to send header information.\n" ); exit( 1 ); } printf ( "Sending image stream (%d x %d, depth %u, %d channels (size: %d bytes)).\n" "Press 'q' to abort.\n", frame->width, frame->height, frame->depth, frame->nChannels, frame->imageSize ); // open capture file, if desired if ( output ) { strncat( outfilename, ".mpg", MAX_FILENAMELEN ); writer = cvCreateVideoWriter( outfilename, atofourcc( fourcc ), fps, cvSize( frame->width, frame->height ), frame->nChannels > 1 ? 1 : 0 ); if ( writer == NULL ) { fprintf( stderr, "unable to create output file '%s'\n", outfilename ); /* exit (1);*/ } else printf( "Writing to output file '%s'.\n", outfilename ); } // for fps measurement struct timeval current, last; unsigned int diff; // time difference in usecs int x0 = 0, y0 = 0, width = 0, height = 0; gettimeofday(&last, NULL); // show first frame for region selection //frame = cvQueryFrame( video ); //cvNamedWindow( win_name, 1 ); //cvShowImage( win_name, frame ); //if (output) // cvWriteFrame( writer, frame ); // get input region char* win_name2 = "First frame"; params p; CvRect* r; int x1, y1, x2, y2; int region[4]; /* use mouse callback to allow user to define object regions */ /*p.win_name = win_name2; p.orig_img = cvClone( frame ); p.cur_img = NULL; p.n = 0; cvNamedWindow( win_name2, 1 ); cvShowImage( win_name2, frame ); cvSetMouseCallback( win_name2, &mouse, &p ); printf("\nSelect Object and press [ENTER] !\n"); cvWaitKey( 0 ); cvDestroyWindow( win_name2 ); cvReleaseImage( &p.orig_img ); if( p.cur_img ) cvReleaseImage( &(p.cur_img) );*/ /* extract regions defined by user; store as an array of rectangles */ if( p.n > 0 ){ p.loc1[0].x = 0; p.loc1[0].y = 0; p.loc2[0].x = 0; p.loc2[0].y = 0; r = malloc( sizeof( CvRect ) ); x1 = MIN( p.loc1[0].x, p.loc2[0].x ); x2 = MAX( p.loc1[0].x, p.loc2[0].x ); y1 = MIN( p.loc1[0].y, p.loc2[0].y ); y2 = MAX( p.loc1[0].y, p.loc2[0].y ); width = x2 - x1; height = y2 - y1; /* ensure odd width and height */ width = ( width % 2 )? width : width+1; height = ( height % 2 )? height : height+1; r[0] = cvRect( x1, y1, width, height ); x0 = x1 + width/2; y0 = y1 + height/2; region[0] = x0; region[1] = y0; region[2] = width; region[3] = height; } //printf("\nx = %d\ny = %d\nwidth = %d\nheight = %d\n\n\n", x0, y0, width, height); // 1) convert bgr frame to hsv frame hsv = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 3 ); bgr2hsv(frame, hsv); result = tcp_send( con, hsv->imageData, hsv->imageSize); int counter = 1; //result = tcp_send( con, frame->imageData, frame->imageSize ); result = tcp_send( con, (char *) region, 4 * sizeof(int)); // confirm input with enter //printf("\nPress [ENTER]\n"); //cvWaitKey( 0 ); // TODO: send first frame + region data //result = tcp_send( con, frame->imageData, frame->imageSize); number_of_particles = 100; // create particles data array particles_data = (particle_data *) malloc ((number_of_particles+1) * sizeof(particle_data)); // quiet mode: no video output if (quiet){ cvDestroyWindow( win_name ); } // 1) send other frames // get video data and send/store it #ifdef STORE_VIDEO //while ( ( frame = cvQueryFrame( video ) ) && ( char ) key != 'q' && !quit && number_of_frames <= 221 ) { while ( ( frame = cvQueryFrame( video ) ) && ( char ) key != 'q' && counter < max_frames) { #else while ( ( frame = cvQueryFrame( video ) ) && ( char ) key != 'q' && counter < max_frames) { #endif // 1) convert bgr frame to hsv frame bgr2hsv(frame, hsv); result = tcp_send( con, hsv->imageData, hsv->imageSize ); //fprintf(stderr, "\n/////////////////// number of frames %d //////////////////////////////////////", number_of_frames); counter ++; //result = tcp_send( con, frame->imageData, frame->imageSize ); if ( result > 0 ) { if ( !quiet ) { cvNamedWindow( win_name, 1 ); #ifdef NO_VGA_FRAMEBUFFER #ifndef STORE_VIDEO if (number_of_frames > 2){ #else if (number_of_frames > 2 && number_of_frames % MAX_FRAMES == 0){ #endif // receive tcp package with particle data and display them in video // 1) receive tcp package with particles data //printf("\nreceive particles..."); result = tcp_receive( con, (unsigned char*)particles_data, ((number_of_particles+1) * sizeof(particle_data))); if ( result > 0 ) { // 2) draw particles data for (i=0; i<number_of_particles+1; i++){ // set OpenCV location points for bounding boxes loc1.x = ntohl(particles_data[i].x1); loc1.y = ntohl(particles_data[i].y1); loc2.x = ntohl(particles_data[i].x2); loc2.y = ntohl(particles_data[i].y2); particles_data[i].best_particle = ntohl(particles_data[i].best_particle); if (particles_data[i].best_particle > 0 ) particles_data[i].best_particle = TRUE; if (loc1.x <640 && loc2.x < 640 && loc1.y < 480 && loc2.y < 480) // draw bounding box (red for best particle, blue else) if (particles_data[i].best_particle == FALSE){ cvRectangle( frame, loc1, loc2, CV_RGB(0,0,255), 1, 8, 0 ); } else { cvRectangle( frame, loc1, loc2, CV_RGB(255,0,0), 1, 8, 0 ); } } } } #endif if (!quiet){ cvShowImage( win_name, frame ); //export_frame( frame, number_of_frames); key = cvWaitKey( 2 ); } } if ( output ) cvWriteFrame( writer, frame ); } else { printf( "connection lost.\n" ); break; } //gettimeofday(¤t, NULL); //diff = (current.tv_sec - last.tv_sec) * 1000000; //diff += (current.tv_usec - last.tv_usec); //fprintf(stderr, "FPS: %.2f\r", 1000000.0 / diff); //last.tv_sec = current.tv_sec; //last.tv_usec = current.tv_usec; number_of_frames++; } /* #ifdef STORE_VIDEO cvReleaseCapture( &video ); // 2) receive particle data and display particle data as Bounding Boxes switch ( source ) { case SOURCE_FILE: video = cvCreateFileCapture( infilename ); break; case SOURCE_CAM: fprintf( stderr, "This part is only possible if video is stored into a file\n" ); exit( 1 ); default: fprintf( stderr, "strange source\n" ); exit( 1 ); } particles_data = malloc ((number_of_particles+1) * sizeof(particle_data)); // get frames while ( ( frame = cvQueryFrame( video ) ) && ( char ) key != 'q' && !quit && number_of_frames <= 221 ) { // 1) receive tcp package with particles data // TODO result = tcp_receive( con, (char *)particles_data, ((number_of_particles+1) * sizeof(particle_data))); if ( result > 0 ) { // 2) draw particles data for (i=0; i<number_of_particles+1; i++){ // set OpenCV location points for bounding boxes loc1.x = particles_data[i].x1; loc1.y = particles_data[i].y1; loc2.x = particles_data[i].x2; loc2.y = particles_data[i].y2; // draw bounding box (red for best particle, blue else) if (particles_data[i].best_particle == TRUE){ cvRectangle( frame, loc1, loc2, CV_RGB(255,0,0), 1, 8, 0 ); } else { cvRectangle( frame, loc1, loc2, CV_RGB(0,0,255), 1, 8, 0 ); } } } // display video frame if (!quiet){ cvNamedWindow( win_name, 1 ); cvShowImage( win_name, frame ); key = cvWaitKey( 2 ); } number_of_frames++; } #endif */ // clean up if (!quiet){ cvDestroyWindow( win_name ); } cvReleaseCapture( &video ); if ( output ) cvReleaseVideoWriter( &writer ); tcp_connection_destroy( con ); return 0; } /* Mouse callback function that allows user to specify the initial object regions. Parameters are as specified in OpenCV documentation. */ void mouse( int event, int x, int y, int flags, void* param ) { params* p = (params*)param; CvPoint* loc; int n; IplImage* tmp; static int pressed = 0; /* on left button press, remember first corner of rectangle around object */ if( event == CV_EVENT_LBUTTONDOWN ) { n = p->n; if( n == 1 ) return; loc = p->loc1; loc[n].x = x; loc[n].y = y; pressed = 1; } /* on left button up, finalize the rectangle and draw it in black */ else if( event == CV_EVENT_LBUTTONUP ) { n = p->n; if( n == 1 ) return; loc = p->loc2; loc[n].x = x; loc[n].y = y; cvReleaseImage( &(p->cur_img) ); p->cur_img = NULL; cvRectangle( p->orig_img, p->loc1[n], loc[n], CV_RGB(0,0,0), 1, 8, 0 ); cvShowImage( p->win_name, p->orig_img ); pressed = 0; p->n++; } /* on mouse move with left button down, draw rectangle as defined in white */ else if( event == CV_EVENT_MOUSEMOVE && flags & CV_EVENT_FLAG_LBUTTON ) { n = p->n; if( n == 1 ) return; tmp = cvClone( p->orig_img ); loc = p->loc1; cvRectangle( tmp, loc[n], cvPoint(x, y), CV_RGB(255,255,255), 1, 8, 0 ); cvShowImage( p->win_name, tmp ); if( p->cur_img ) cvReleaseImage( &(p->cur_img) ); p->cur_img = tmp; } }
int main( int argc, char** argv ) { CvSeq* comp = 0; CvRect window, eye; int key, nc, found; int stage = STAGE_INIT; key=0; startpos_x = 0; startpos_y = 0; search_window_x=0,search_window_y=0; /* Initialize Capture from webcam * Here '0' in cvCaptureCAM indicates the Index of the camera to be used. */ capture = cvCaptureFromCAM(0); if (!capture) exit_nicely("Webcam Not found!"); cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 300); cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 250); // Grabs and returns a frame from the camera. frame = cvQueryFrame(capture); if (!frame) exit_nicely("Cannot query frame!"); // Create a window named 'Video' with window size Normal. cvNamedWindow("video",CV_WINDOW_NORMAL); /* * Creates Windows Handler for keeping the frame window * always on top of other windows. */ HWND win_handle = FindWindow(0, "video"); if (!win_handle) { printf("Failed FindWindow\n"); } SetWindowPos(win_handle, HWND_TOPMOST, 0, 0, 0, 0, 1); ShowWindow(win_handle, SW_SHOW); // Create a callback to mousehandler when mouse is clicked on frame. cvSetMouseCallback( "video", mouseHandler,NULL ); // cvCreateImage is used to create & allocate image data nose_template = cvCreateImage( cvSize( NOSE_TPL_WIDTH, NOSE_TPL_HEIGHT ),frame->depth, frame->nChannels ); nose_template_match = cvCreateImage( cvSize( BOUNDARY_WINDOW_WIDTH - NOSE_TPL_WIDTH + 1, BOUNDARY_WINDOW_HEIGHT - NOSE_TPL_HEIGHT + 1 ),IPL_DEPTH_32F, 1 ); // Initialize Memory for storing the frames storage = cvCreateMemStorage(0); if (!storage) exit_nicely("Cannot allocate memory storage!"); /* Allocates and Fills the structure IplConvKernel , which can be used as a structuring element in the morphological operations */ kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_CROSS, NULL); gray = cvCreateImage(cvGetSize(frame), 8, 1); prev = cvCreateImage(cvGetSize(frame), 8, 1); diff = cvCreateImage(cvGetSize(frame), 8, 1); eye_template = cvCreateImage(cvSize(EYE_TPL_WIDTH, EYE_TPL_HEIGHT), 8, 1); // Show if any error occurs during allocation if (!kernel || !gray || !prev || !diff || !eye_template) exit_nicely("System error."); gray->origin = frame->origin; prev->origin = frame->origin; diff->origin = frame->origin; // Loop until ESC(27) key is pressed while( key != 27 ) { // Get a frame from CAM frame = cvQueryFrame( capture ); /* Always check if frame exists */ if( !frame ) break; // Flip the frame vertically cvFlip( frame, frame, 1 ); frame->origin = 0; // Eye blink detection & tracking if (stage == STAGE_INIT) window = cvRect(0, 0, frame->width, frame->height); // Convert original image to thresholded(grayscale) image for efficient detection*/ cvCvtColor(frame, gray, CV_BGR2GRAY); // Find connected components in the image nc = get_connected_components(gray, prev, window, &comp); // Check if eyes are detected and start tracking by setting Region of Interest(ROI) if (stage == STAGE_INIT && is_eye_pair(comp, nc, &eye)) { cvSetImageROI(gray, eye); cvCopy(gray, eye_template, NULL); cvResetImageROI(gray); // Start tracking eyes for blink stage = STAGE_TRACKING; } // Here the tracking will start & will check for eye blink if (stage == STAGE_TRACKING) { // Try to locate the eye found = locate_eye(gray, eye_template, &window, &eye); // If eye is not found here or 'r' is pressed, restart the eye detection module if (!found || key == 'r') stage = STAGE_INIT; DRAW_RECTS(frame, diff, window, eye); // Check if there was an eye blink if (is_blink(comp, nc, window, eye)) { //DRAW_RECTS(frame, diff, window, eye); printf("Eye Blinked!"); // Perform mouse left button click on blink mouse_event(MOUSEEVENTF_LEFTDOWN | MOUSEEVENTF_LEFTUP, 0,0,0,0); } } prev = (IplImage*)cvClone(gray); /* Perform nose tracking if template is available Here tracking will start & continues till selected templated is within specified threshold limit */ if( is_tracking ) trackObject(); // Display the frame in window cvShowImage( "video", frame ); // Check for a key press key = cvWaitKey( 10 ); } // Exit without any error exit_nicely(NULL); }
int getPegthresholdFromUser(IplImage *img, Gui *gui, string message, int pegThreshVal, Rect r, cv::Mat &fgMaskPeg) { cv::Mat element[1]; int count = 0; element[0] = getStructuringElement(MORPH_ELLIPSE, Size(8, 8), Point(0, 0)); window_name = gui->windowName(); cvDestroyWindow(window_name.c_str()); cvNamedWindow(window_name.c_str(), CV_WINDOW_AUTOSIZE); cvMoveWindow(window_name.c_str(), 100, 100); img0 = (IplImage *)cvClone(img); char TrackbarName[50]; sprintf(TrackbarName, "thresh x %d", slider_max); slider_val = pegThreshVal; createTrackbar(TrackbarName, window_name, &slider_val, slider_max, 0); Mat src, im1, im3; src = Mat(img0); im1 = Mat::zeros(src.size(), src.type()); cvtColor(src, im3, CV_BGR2HSV); vector<vector<Point> > pegsI; while (1) { pegsI.clear(); Mat channel[3]; split(im3, channel); //Mat fgMaskRing; inRange(channel[2], slider_val, 255, fgMaskPeg); // ROI for (int y = 0; y < fgMaskPeg.rows; y++) { for (int x = 0; x < fgMaskPeg.cols; x++) { if (!(x >= r.tl().x && x <= r.br().x && y >= r.tl().y && y <= r.br().y)) { fgMaskPeg.at<uchar>(Point(x, y)) = 0; } } } erode(fgMaskPeg, fgMaskPeg, element[0]); dilate(fgMaskPeg, fgMaskPeg, element[0]); erode(fgMaskPeg, fgMaskPeg, element[0]); dilate(fgMaskPeg, fgMaskPeg, element[0]); //p.copyTo(p, fgMaskPeg); for (int y = 0; y < src.rows; y++) { for (int x = 0; x < src.cols; x++) { if (fgMaskPeg.at<uchar>(Point(x, y))) { im1.at<Vec3b>(Point(x, y)) = src.at<Vec3b>(Point(x, y)); } else { im1.at<Vec3b>(Point(x, y)) = Vec3b(0,0,0); } } } Mat mask = fgMaskPeg.clone(); vector<Vec4i> hierarchy_ring; //imshow("Initial mask", initial_ring_mask); findContours(mask, pegsI, hierarchy_ring, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); count = pegsI.size(); cout << "count Pegs->" << count << endl; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8); putText(im1, message.c_str(), cvPoint(0, 60), CV_FONT_HERSHEY_SIMPLEX, .7, Scalar(255, 255, 0), 1); imshow(window_name.c_str(), im1); char key = cvWaitKey(40); if ((key == '\r' || key == '\n' || key == '\r\n')) { if (count == 12) { break; } } count = 0; } cvReleaseImage(&img0); return slider_val; }
int main(int argc, char** argv) { // Load and display original image puts("Loading image..."); CvMat* img = cvLoadImageM(PATH, CV_LOAD_IMAGE_COLOR); CvMat* orig = cvCloneMat(img); cvCvtColor(img, img, CV_BGR2Lab); if (SMOOTH_ORIGINAL) { cvSmooth(img, img, CV_GAUSSIAN, SMOOTH_ORIGINAL, 0, 0, 0); } //chromacity(img); //show(ORIGINAL_IMAGE_WINDOW_NAME, orig); //show(PRETREATED_IMAGE_WINDOW_NAME, img); // Generate a Gabor filter bank puts("Generating Gabor filter bank..."); FilterBank filter_bank; generate_gabor_filter_bank(&filter_bank, N_BANDWIDTHS, bandwidths, N_FREQS, spatial_frequencies, N_ORIENTATIONS, orientations); // Separate each channel puts("Separating channels..."); CvMat *ch1 = cvCreateMat(img->rows, img->cols, CV_8UC1); CvMat *ch2 = cvCreateMat(img->rows, img->cols, CV_8UC1); CvMat *ch3 = cvCreateMat(img->rows, img->cols, CV_8UC1); cvSplit(img, ch1, ch2, ch3, NULL); // Apply the filter bank on each one of them puts("Applying filters..."); CvMat **results = (CvMat**) malloc(3 * filter_bank.size * sizeof (CvMat*)); CvMat **filtered_channel_1 = results; apply_filter_bank(&filter_bank, ch1, filtered_channel_1); CvMat **filtered_channel_2 = results + filter_bank.size; apply_filter_bank(&filter_bank, ch2, filtered_channel_2); CvMat **filtered_channel_3 = results + 2 * filter_bank.size; apply_filter_bank(&filter_bank, ch3, filtered_channel_3); // Now sort the samples puts("Sorting..."); int n_channels = (IGNORAR_L ? 2 : 3); results = (IGNORAR_L ? filtered_channel_2 : results); CvMat *samples; sort_samples(n_channels * filter_bank.size, results, &samples); printf("Samples: %d(x%d)", samples->rows, samples->cols); fflush(stdout); // And cluster them printf("Clustering... "); CvScalar color_tab[8]; color_tab[0] = CV_RGB(255, 0, 0); color_tab[1] = CV_RGB(0, 255, 0); color_tab[2] = CV_RGB(0, 0, 255); color_tab[3] = CV_RGB(0, 255, 255); color_tab[4] = CV_RGB(255, 0, 255); color_tab[5] = CV_RGB(255, 255, 0); color_tab[6] = CV_RGB(255, 255, 255); color_tab[7] = CV_RGB(0, 0, 0); CvMat *labels = cvCreateMat(samples->rows, 1, CV_32SC1); cvKMeans2(samples, K_CLUSTERS, labels, cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 1.0), 10, NULL, 0, NULL, NULL); puts("done"); fflush(stdout); CvMat *color_labels = cvCreateMat(img->rows, img->cols, CV_8UC3); CvMat **classes = malloc(K_CLUSTERS * sizeof (CvMat*)); for (int i = 0; i < K_CLUSTERS; i++) { classes[i] = cvCreateMat(img->rows, img->cols, CV_8UC1); cvZero(classes[i]); } img_from_labels(labels, classes, color_labels, color_tab); //show("Labels", labeled_img); CvMat *mix = cvClone(img); cvAddWeighted(orig, 0.7, color_labels, 0.3, 0, mix); // puts("Outputting..."); char out_file_name[256]; sprintf(out_file_name, "%s/%s.png", OUTPUT_PATH, "original"); cvSaveImage(out_file_name, orig, NULL); output_base_channels(img); if (!IGNORAR_L) { output_filtered_images("CH1", filter_bank.size, filtered_channel_1); } output_filtered_images("CH2", filter_bank.size, filtered_channel_2); output_filtered_images("CH3", filter_bank.size, filtered_channel_3); output_filter_bank(&filter_bank); // output labels output_classes(classes, orig); // output colored and mix sprintf(out_file_name, "%s/%s.png", OUTPUT_PATH, "coloured"); cvSaveImage(out_file_name, color_labels, NULL); sprintf(out_file_name, "%s/%s.png", OUTPUT_PATH, "mix"); cvSaveImage(out_file_name, mix, NULL); //show("Mix", mix); // cvWaitKey(0); // cvWaitKey(0); // cvWaitKey(0); // Should do some cleanup here... :_( return (EXIT_SUCCESS); }
int main (void) { MyFreenectDevice * freenect; Freenect::Freenect freeNect; IplImage * tmp2 = cvCreateImage(cvSize(640, 480), 8, 3); IplImage * tmp = cvCreateImage(cvSize(800, 600), 8, 3); CvFont font; int selected = 1; Menu * menu = new Menu(5); cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 5); freenect = &freeNect.createDevice<MyFreenectDevice>(0); cvNamedWindow("fingers", CV_WINDOW_NORMAL); cvSetWindowProperty("fingers", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN); cvNamedWindow("surface"); cvResizeWindow("fingers", 800, 600); cvSet(tmp, CV_RGB(255, 255, 255)); cvMoveWindow("fingers", 0, 1050); cvMoveWindow("surface", 0, 0); cvSet(tmp, CV_RGB(255, 255, 255)); cvShowImage("fingers", tmp); cvNamedWindow("rgb"); cvMoveWindow("rgb", 0 , 480); cvNamedWindow("depth"); cvMoveWindow("depth", 640 , 480); freenect->startStream(); freenect->setTilt(0); freenect->setThresh(true, 0, 700); int waitTime = 0; while (freenect->isKinectRunning()) { if (freenect->isDepthReady() && freenect->isRgbReady()) { cvSet(tmp, CV_RGB(255, 255, 255)); cvZero(tmp2); IplImage * img = (IplImage*)cvClone(freenect->fetchKinectRgbFrame()); if (freenect->isCalibrated() == false && !freenect->getSurface()) { sleep(1); freenect->calibrate(); } else if (freenect->isCalibrated() == false) { if (waitTime < 30) { cvPutText(tmp, "Initiating manual calibration", cvPoint(250, 200), &font, CV_RGB(255, 0 , 0)); cvPutText(tmp, "due to bad lighting conditions", cvPoint(250, 300), &font, CV_RGB(255, 0 , 0)); ++waitTime; } } else { cvSet(tmp, CV_RGB(0, 0, 0)); freenect->calcFingerPositions(); std::list<Finger> fList = freenect->getFingerList(); std::list<CvPoint> fListRaw = freenect->getRawFingerList(); for (std::list<CvPoint>::iterator it = fListRaw.begin() ; it != fListRaw.end() ; ++it) { cvCircle(freenect->fetchKinectDepthFrame(), cvPoint(it->x, it->y), 10, CV_RGB(0, 255, 0), CV_FILLED); } std::vector<CvPoint> twoFirst; for (std::list<Finger>::iterator it = fList.begin() ; it != fList.end() ;) { cvCircle(tmp, cvPoint(it->x, it->y), 10, CV_RGB(255, 0, 0), CV_FILLED); ++it; } menu->interpretGesture(fList); } menu->drawMenu(tmp); cvShowImage("fingers", tmp); cvShowImage("surface", tmp2); cvShowImage("rgb", freenect->fetchKinectRgbFrame()); cvShowImage("depth", freenect->fetchKinectDepthFrame()); cvReleaseImage(&img); } freenect->update(); int k = freenect->getKey(); if (k == 27) freenect->setRunning(false); } freenect->stopStream(); cvDestroyAllWindows(); exit(0); return 0; }
int main( int argc, char* argv[]){ char c; int fileFlag = 0, psfFlag = 0, errFlag = 0, gpuFlag = 0, kernelSize = 2, blurFlag = 0, ux = 0, uy = 0; float stddev = 1.0f; double snr = 0.005; char *filename, *psfname; extern char *optarg; extern int optind, optopt, opterr; while ((c = getopt(argc, argv, ":bgk:s:d:f:p:x:y:")) != -1) { switch(c) { case 'b': printf("Blur image first\n"); blurFlag = 1; break; case 'g': printf("Use GPU Kernel\n"); gpuFlag = 1; break; case 'k': kernelSize = atoi(optarg); printf("Kernel size: %d\n", kernelSize); break; case 's': snr = atof(optarg); printf("Singal-to-noise ratio: %f\n", snr); break; case 'd': stddev = atof(optarg); printf("Kernel stddev: %f\n", stddev); break; case 'f': filename = optarg; fileFlag = 1; printf("Processing file: %s\n", filename); break; case 'p': psfname = optarg; psfFlag = 1; printf("Kernel image: %s\n", psfname); break; case 'x': ux = atoi(optarg); printf("Offset X: %d\n", ux); break; case 'y': uy = atoi(optarg); printf("Offset Y: %d\n", uy); break; case ':': printf("-%c without input\n", optopt); errFlag++; break; case '?': printf("unknown arg %c\n", optopt); errFlag++; break; } } if (errFlag || !fileFlag) { goto ERROR; } IplImage* img; IplImage* srcImg = cvLoadImage(filename, CV_LOAD_IMAGE_COLOR); if (!srcImg) goto ERROR; int side = max(srcImg->width, srcImg->height) + kernelSize * 2; if (srcImg->height != srcImg->width) { CvSize size = cvSize(side, side); img = cvCreateImage(size, IPL_DEPTH_8U, 3); CvPoint offset = cvPoint((side - srcImg->width) / 2, (side - srcImg->height) / 2); cvCopyMakeBorder(srcImg, img, offset, IPL_BORDER_REPLICATE, cvScalarAll(0)); } else { img = srcImg; } IplImage* imgSplit[3]; for(int c = 0; c < 3; ++c) imgSplit[c] = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); cvSplit(img, imgSplit[0], imgSplit[1], imgSplit[2], NULL); printf("Height: %d, Width: %d\n", img->height, img->width); double * psf = (double *) malloc (sizeof(double) * img->width * img->height); for (int i = 0; i < img->width * img->height; i++) psf[i] = 0.; if(!img || !psf) return 1; double scale = 0; // cyclic if (psfFlag) { IplImage *kernelImage = cvLoadImage(psfname, CV_LOAD_IMAGE_GRAYSCALE); if (!kernelImage) { goto ERROR; } scale = readPSF(psf, kernelImage, img->height, img->width); } else { scale = genPSF(psf, img->height, img->width, kernelSize, stddev, ux, uy); } IplImage* psfImg = cvCreateImage(cvGetSize(img), IPL_DEPTH_64F, 1); for(int h = 0 ; h < img->height; ++h){ for( int w = 0; w < img->width; ++w){ IMG_ELEM_DOUBLE(psfImg, h, w) = psf[h * img->width + w]; } } if (blurFlag) { imgSplit[0] = blurPSF(imgSplit[0], psfImg); imgSplit[1] = blurPSF(imgSplit[1], psfImg); imgSplit[2] = blurPSF(imgSplit[2], psfImg); cvMerge(imgSplit[0], imgSplit[1], imgSplit[2], NULL, img); } IplImage* dbl1; IplImage* dbl2; IplImage* dbl3; if (gpuFlag) { dbl1 = deblurGPU(imgSplit[0], psfImg, snr, scale); dbl2 = deblurGPU(imgSplit[1], psfImg, snr, scale); dbl3 = deblurGPU(imgSplit[2], psfImg, snr, scale); } else { dbl1 = deblurFilter(imgSplit[0], psfImg, snr, scale); dbl2 = deblurFilter(imgSplit[1], psfImg, snr, scale); dbl3 = deblurFilter(imgSplit[2], psfImg, snr, scale); } IplImage* dbl = cvClone(img); cvMerge(dbl1, dbl2, dbl3, NULL, dbl); char psfFile[256], blurFile[256], deblurFile[256]; char *pch = strchr(filename, '.'); (*pch) = '\0'; if (blurFlag) { snprintf(psfFile, 250, "%s_psf.png", filename); snprintf(blurFile, 250, "%s_blur.png", filename); } if (psfFlag) { snprintf(deblurFile, 250, "%s_%2.4f_deblur.png", filename, snr); } else { snprintf(deblurFile, 250, "%s_%d_%2.2f_%2.4f_%d_%d_deblur.png", filename, kernelSize, stddev, snr, ux, uy); } // ROI IplImage* blurROI; IplImage* deblurROI; CvRect rect; rect.x = (side - srcImg->width) / 2; rect.y = (side - srcImg->height) / 2; rect.width = srcImg->width; rect.height = srcImg->height; if (blurFlag) { cvSetImageROI(img, rect); blurROI = cvCloneImage(img); cvSaveImage(psfFile, psfImg, 0); cvSaveImage(blurFile, blurROI, 0); } cvSetImageROI(dbl, rect); deblurROI = cvCloneImage(dbl); cvSaveImage(deblurFile, deblurROI, 0); cvReleaseImage(&imgSplit[0]); cvReleaseImage(&imgSplit[1]); cvReleaseImage(&imgSplit[2]); cvReleaseImage(&psfImg); cvReleaseImage(&img); cvReleaseImage(&dbl); cvReleaseImage(&dbl1); cvReleaseImage(&dbl2); cvReleaseImage(&dbl3); return 0; ERROR: fprintf(stderr, "Usage: -f [/path/to/image] path to the image file\n"); fprintf(stderr, " -p [/path/to/kernel] path to the kernel\n"); fprintf(stderr, " -k [2] kernel size\n"); fprintf(stderr, " -s [0.005] signal-to-noise ratio\n"); fprintf(stderr, " -d [1.0] standard deviation\n"); fprintf(stderr, " -x [0] center offset x\n"); fprintf(stderr, " -y [0] center offset y\n"); fprintf(stderr, " -g use GPU kernel\n"); fprintf(stderr, " -b blur image first\n"); return 1; }