Example #1
0
int detect_multiple1( IplImage* img, CvRect* found_obj[], CvSize sz, int feature  )
{

	static CvRect face_rect;
	IplImage *gray=NULL, *small_img=NULL;
	int tot=0;
	
	switch (feature) {
		case FACE:
			cascade=face_cascade;
			break;
		case NOSE:
			cascade=nose_cascade;
			break;
		case MOUTH:
			cascade=mouth_cascade;
			break;
		case EYE:
		case LEYE:
			cascade=leye_cascade;
			break;
		case REYE:
			cascade=reye_cascade;
			break;
		case PROFILE_FACE:
			cascade=profile_face_cascade;
			break;
		default:
			cascade = face_cascade;
			break;
	}	
	
	if (feature == FACE) {
		//cvResetImageROI(img);
		if(gray) {
			cvResetImageROI(gray);
			cvReleaseImage(&gray);
		}
		if(small_img) {
			cvResetImageROI(small_img);
			cvReleaseImage(&small_img);
		}
		gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
		small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
										  cvRound (img->height/scale)), 8, 1 );
		
		cvCvtColor( img, gray, CV_BGR2GRAY );
		cvResize( gray, small_img, CV_INTER_LINEAR );
		cvEqualizeHist( small_img, small_img );
	}

	else{
		cvSetImageROI(small_img, face_rect);
		//		printf("found face (%d,%d,%d,%d) setting ROI to (%d,%d,%d,%d)\n",r->x,r->y,r->width,r->height,prev.x,prev.y,prev.width,prev.height);
	}	
		
	cvClearMemStorage( storage );
	
	//	for(i=0;i<10;i++) {
	//		double t = (double)cvGetTickCount();
	
	CvSeq* objects = cvHaarDetectObjects( small_img, cascade, storage,
										 1.1, 2, 0
									   //1.2, 0, 0
										// |CV_HAAR_FIND_BIGGEST_OBJECT
										// |CV_HAAR_DO_ROUGH_SEARCH
										// |CV_HAAR_DO_CANNY_PRUNING
										// |CV_HAAR_SCALE_IMAGE
										 ,
										 sz);
										 //cvSize(30, 30) );
	//		t = (double)cvGetTickCount() - t;
	//		printf( "detection time = %gms, faces: %d\n", t/((double)cvGetTickFrequency()*1000.), faces->total );
	tot=objects->total;
	int i=0;
	if(objects->total>0) {
		if(tot > MAX_RECT) tot=MAX_RECT;
		for(i=0; ((i < tot) &&( i < MAX_RECT)); i++) {
			CvRect* r = (CvRect*)cvGetSeqElem( objects, i );
			
			if(feature == FACE) { 
				if(i==0)
					face_rect = cvRect(r->x, r->y, r->width, r->height);
			}
			
			else {
				if(face_rect.width > 0 && face_rect.height > 0) {
					r->x += face_rect.x;
					r->y += face_rect.y;
				}
				
			}
			
			found_obj[i]->x = (int)((double)r->x * scale);
			found_obj[i]->y = (int)((double)r->y * scale);
			found_obj[i]->width = (int)((double)r->width * scale);
			found_obj[i]->height = (int)((double)r->height);
			if(feature == FACE) 
				found_obj[i]->height = (int)((double)r->height * scale);
			
		}
	} 
	
	if(feature != FACE)
		cvResetImageROI(small_img);
	// eyes are last
	if(feature == EYE) {
		cvReleaseImage(&gray);
		cvReleaseImage(&small_img);
	}
	return tot;
}
Example #2
0
void GetImageData(int w,int h,int bpp,int channels,unsigned char *rawArray)
{

    int nWidth,nHeight,nBpp;

    unsigned char *ImgData = (unsigned char *)malloc(qhyusb->QCam.cameraW*qhyusb->QCam.cameraH*3*bpp/8);
    #ifdef QHYCCD_DEBUG
    printf("GetImageData:Malloc memory Size %d\n",qhyusb->QCam.cameraW*qhyusb->QCam.cameraH*3*bpp/8);
    #endif

    GetFrame(ImgData, &nWidth, &nHeight, &nBpp, NULL, NULL, NULL, NULL);
    #ifdef QHYCCD_DEBUG
    printf("GetImageData:nWidth %d nHeight %d nBpp %d\n",nWidth,nHeight,nBpp);
    #endif

    memcpy(rawArray, ImgData,nWidth*nHeight*nBpp/8);
    
    if(bpp != nBpp)
    {
	if((bpp == 8) && (nBpp ==16))
	{
	    int i = 1;
	    int j = 0;
	    while(j < w*h)
	    {
                rawArray[j] = rawArray[i];
                j += 1;
		i += 2;
	    }
	}
	else if((bpp == 16) && (nBpp == 8))
	{
	    int i = 1;
	    int j = 0;

	    unsigned char *tempArray = (unsigned char *)malloc(w*h*2);

	    memcpy(tempArray,rawArray,w*h);
	    while(j < w*h)
	    {
                tempArray[i] = rawArray[j];
		tempArray[i-1] = 0;
	        j += 1;
		i += 2;
	    }
	    memcpy(rawArray,tempArray,w*h*2);
	    free(tempArray);
	}
    }

    if(channels == 3)
    {
	IplImage *img = cvCreateImage(cvSize(nWidth,nHeight),bpp,1);
	img->imageData = (char *)rawArray;
        IplImage *colorimg = cvCreateImage(cvSize(nWidth,nHeight),bpp,channels);
         
	if(qhyusb->QCam.CAMERA == DEVICETYPE_QHY5LII)
	{
            cvCvtColor(img,colorimg,CV_BayerGR2RGB);
	}
	memcpy(rawArray,colorimg->imageData,colorimg->imageSize);
	cvReleaseImage(&img);
	cvReleaseImage(&colorimg);
    }

    IplImage *img = cvCreateImage(cvSize(nWidth,nHeight),bpp,channels);
    img->imageData = (char *)rawArray;

    IplImage *rszimg = cvCreateImage(cvSize(w,h),bpp,channels);
    cvResize(img,rszimg,CV_INTER_NN);
    memcpy(rawArray,rszimg->imageData,rszimg->imageSize);
    cvReleaseImage(&rszimg);
    cvReleaseImage(&img);   
    free(ImgData);

}
Example #3
0
IplImage* detect_and_draw(IplImage* img, double scale = 1.3)
{
	IplImage* img1;
	char * str;
	static CvScalar colors[] = {
		{{0,0,255}}, {{0,128,255}},{{0,255,255}},{{0,255,0}},
		{{255,128,0}},{{255,255,0}},{{255,0,0}}, {{255,0,255}}
	}; //Just some pretty colors to draw with
	// IMAGE PREPARATION:
	//
	IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
	IplImage* small_img = cvCreateImage(
	cvSize( cvRound(img->width/scale), cvRound(img->height/scale)), 8, 1);
	cvCvtColor( img, gray, CV_BGR2GRAY );
	cvResize( gray, small_img, CV_INTER_LINEAR );
	cvEqualizeHist( small_img, small_img );
	// DETECT OBJECTS IF ANY
	//
	cvClearMemStorage( storage );
	fprintf(stderr,"size: %d %d\n",cvGetSize(small_img).width,cvGetSize(small_img).height);
	CvSeq* objects = cvHaarDetectObjects(
										small_img,
										cascade,
										storage,
										1.1,
										2,
										0 ,
										cvSize(35, 35)
										);
	// LOOP THROUGH FOUND OBJECTS AND DRAW BOXES AROUND THEM
	//
	// for(int i = 0; i<(objects ? objects->total : 0); i++ )
	fprintf(stderr,"size: %d %d\n",cvGetSize(small_img).width,cvGetSize(small_img).height);
	if( 0<(objects ? objects->total : 0))
	{

		CvRect* r = (CvRect*)cvGetSeqElem( objects, 0 );

		cvSetImageROI(img,*r);

		img1=cvCreateImage(cvSize(r->width,r->height),img->depth,img->nChannels);

		cvCopy(img,img1,NULL);
		cvRectangle(
					img,
					cvPoint(r->x,r->y),
					cvPoint(r->x+r->width,r->y+r->height),
					colors[0]
		);
		cvResetImageROI(img);
		HAND=1;

	}
	else
	{
		HAND=0;		
		img1=cvCreateImage(cvSize(100,100),img->depth,img->nChannels);
	}


	cvReleaseImage( &gray);
	cvReleaseImage( &small_img );
	return img1;
}
void *process2(void *arg)
{
	do

	{
			frame2 = cvQueryFrame(capture2); // pointer to a cvCapture structure

			if(!frame2)
			  break;

			cvShowImage("video2", frame2);
			roi2Image=cvCloneImage(frame2);

			if ((orig2.x != dest2.x) && (orig2.y != dest2.y))
			{	cvSetImageROI(roi2Image, cvRect(orig2.x<dest2.x?orig2.x:dest2.x, orig2.y<dest2.y?orig2.y:dest2.y,
						abs(dest2.x-orig2.x),abs(dest2.y-orig2.y)));
			roi2Adj = cvCreateImage(cvSize(abs(dest2.x-orig2.x)*input_resize_percent/100,
												abs(dest2.y-orig2.y)*input_resize_percent/100),
												roi2Image->depth, roi2Image->nChannels);
			}
			else
			{
				cvSetImageROI(roi2Image,cvRect(0,0,frame2->width,frame2->height));
			roi2Adj = cvCreateImage(cvSize((int)((frame2->width*input_resize_percent)/100) , (int)((frame2->height*input_resize_percent)/100)),
					frame2->depth, frame2->nChannels);
			}
			cvResize(roi2Image, roi2Adj, CV_INTER_LINEAR);

	pthread_mutex_lock(&lock);
					if (!changing && !analiza1)
					{
						Ncarros=detect(roi2Adj);
						printf("Proceso 2 Numero de Carros: %d \n", Ncarros);
						if (Ncarros>= 4)
							{
							reset=true;
							pthread_create(&cambioSem,NULL,GreentoRed2,semaphore2);
							analiza2=false;
							analiza1=true;
							}
				}

				if (timeout && !analiza1)
					{
						timeout=false;
						pthread_create(&cambioSem,NULL,GreentoRed2,semaphore2);
						analiza2=false;
						analiza1=true;
					}


		pthread_mutex_unlock(&lock);
		cvShowImage("image2", roi2Adj);


			key = cvWaitKey(2);

			if(key == KEY_ESC)
			  break;
			usleep(10000);

	}while(1);


			 cvDestroyAllWindows();
			  pthread_cancel(proc1);
			  pthread_cancel(timeOuts);
			  pthread_cancel(cambioSem);
			  cvReleaseImage(&frame1);
			  cvReleaseImage(&roi1Image);
			  cvReleaseImage(&roi1Adj);
			  cvReleaseImage(&semaphore1);
			  cvReleaseCapture(&capture1);
			  cvReleaseImage(&frame2);
			  cvReleaseImage(&roi2Image);
			  cvReleaseImage(&roi2Adj);
			  cvReleaseImage(&semaphore2);
			  cvReleaseCapture(&capture2);
			  cvReleaseHaarClassifierCascade(&cascade);
			  cvReleaseMemStorage(&storage);
			  finish=true;
			  return NULL;
}
Example #5
0
int main(int argc, char* argv[]){
	CCmdLine cmdLine;
	
	cmdLine.SplitLine(argc, argv);

	if ( !(cmdLine.HasSwitch("-i") && cmdLine.HasSwitch("-o") && (cmdLine.HasSwitch("-pos") || cmdLine.HasSwitch("-f") || cmdLine.HasSwitch("-pbOnly"))) ){
		fprintf(stderr, "usage: %s -i <image> -o <output-directory> < -pos <x> <y> | -f <fixation-points-file> > [ -pb <probabilistic-boundary-prefix ] [ -flow <optical-flow-file> ] [ -sobel ]\n",argv[0]);
		fprintf(stderr, "OR \t %s -pbOnly -i <image> -o <output-probabilistic-boundary-prefix>\n",argv[0]);
		exit(1);
	}
	class segLayer frame1;
	char tmp[80];
	strcpy (tmp, cmdLine.GetArgument("-i", 0).c_str());

	int64 tic1,tic2,tic3,tic4;
	double ticFrequency = cvGetTickFrequency()*1000000;
	tic1=cvGetTickCount();

	IplImage *im=cvLoadImage(tmp), *im2;
	#ifdef CUDA_SUPPORT
	int maxWidth=640;
	#else
	int maxWidth=640;
	#endif
	bool resized=false;
	float scale=1;
	if(cvGetSize(im).width>maxWidth){
		scale=maxWidth/(double)(cvGetSize(im).width);
		printf("Image too big, resizing it for the segmentation...\n");
	    	int newHeight=(int)(cvGetSize(im).height*scale);
	    	im2=cvCreateImage( cvSize(maxWidth,newHeight), IPL_DEPTH_8U, 3 );
	    	cvResize(im,im2);
		resized=true;
	}else{
		im2=im;
	}
  	frame1.setImage(im2);

	if (cmdLine.HasSwitch("-pb")){
		strcpy (tmp, cmdLine.GetArgument("-pb", 0).c_str());
		frame1.readPbBoundary(tmp);
	}else{

		// Edge detection!
		if (cmdLine.HasSwitch("-sobel"))
	  		frame1.edgeSobel();
		else{
	#ifdef CUDA_SUPPORT
			if(!get_lock()){
				fprintf(stderr,"Impossible to get the lock...\n");
				exit(1);
			}
			frame1.edgeGPU(false);
			if(!release_lock()){
				fprintf(stderr,"Impossible to release the lock...\n");
				exit(1);
			}
	#else
			frame1.edgeCGTG();
	#endif
		}

		tic2=cvGetTickCount();

		if (cmdLine.HasSwitch("-flow")){
			strcpy (tmp, cmdLine.GetArgument("-flow", 0).c_str());
			IplImage *flow=cvLoadImage(tmp);
			IplImage *flow32 = cvCreateImage(cvGetSize(flow), IPL_DEPTH_32F,3);
			IplImage *flowU = cvCreateImage(cvGetSize(flow), IPL_DEPTH_32F,1);
	  		IplImage *flowV = cvCreateImage(cvGetSize(flow), IPL_DEPTH_32F,1);
			cvConvertScale(flow, flow32, 40/255.,-20);
			cvSplit(flow32,flowU,NULL,NULL,NULL);
			cvSplit(flow32,NULL,flowV,NULL,NULL);
			frame1.setU(flowU);
			frame1.setV(flowV);
			cvReleaseImage(&flow);
			cvReleaseImage(&flow32);
		}


		frame1.generatePbBoundary();
	}
	
	if (cmdLine.HasSwitch("-pbOnly")){
		strcpy (tmp, cmdLine.GetArgument("-o", 0).c_str());
		frame1.savePbBoundary(tmp);
	}else{
		frame1.allocateMemForContours();// Don't forget to allocate memory to store the region contours.
		//select fixation point!
		if(cmdLine.HasSwitch("-pos")){
			float x,y;
			sscanf(cmdLine.GetArgument("-pos", 0).c_str(),"%f",&x);
			sscanf(cmdLine.GetArgument("-pos", 1).c_str(),"%f",&y);
			frame1.assignFixPt((int)(x*scale), (int)(y*scale));
		}else{
			strcpy (tmp, cmdLine.GetArgument("-f", 0).c_str());
			frame1.readFixPts(tmp,scale);
		}
		//segment
		frame1.segmentAllFixs();		
		
		tic3=cvGetTickCount();

		//display!
		//frame1.displayCurrSegs(-1);
		strcpy (tmp, cmdLine.GetArgument("-o", 0).c_str());
		//sprintf(tmp,"%s/",tmp);
		if(resized)
			frame1.saveResizedRegions(tmp,cvGetSize(im).width,cvGetSize(im).height);
		else
			frame1.saveRegions(tmp);
		//release memory!
		frame1.deallocateMemForContours();
	}

	tic4=cvGetTickCount();
	printf("\n\nTotal time = %f\n",(tic4-tic1)/ticFrequency);
	if(!cmdLine.HasSwitch("-pb"))
		printf("\t edges detection = %f\n",(tic2-tic1)/ticFrequency);
	if(!cmdLine.HasSwitch("-pbOnly"))
		printf("\t segmentation = %f\n",(tic3-tic2)/ticFrequency);
  	return 0;
	
}
Example #6
0
/* main code */
int main(int argc, char** argv ){
	CvCapture   *capture;
	int i, key;
	//*struct tm *newtime; 
	time_t second,milsecond;
	char rightfilename[100], leftfilename[100];

	// ブーストされた分類器のカスケードを読み込む
//	cascade = (CvHaarClassifierCascade *) cvLoad (cascade_name, 0, 0, 0);
	righteye_cascade = (CvHaarClassifierCascade *) cvLoad (righteye_cascade_name, 0, 0, 0);
	lefteye_cascade = (CvHaarClassifierCascade *) cvLoad (lefteye_cascade_name, 0, 0, 0);

	/* initialize camera */
	capture = cvCaptureFromCAM( 0 );

	/* always check */
	if( !capture ) return 1;

	/* get video properties, needed by template image */
	frame = cvQueryFrame( capture );
	if ( !frame ) return 1;
    
	/* create template image */
	tpl = cvCreateImage( cvSize( TPL_WIDTH, TPL_HEIGHT ), 
                         frame->depth, frame->nChannels );
    
	/* create image for template matching result */
	tm = cvCreateImage( cvSize( WINDOW_WIDTH  - TPL_WIDTH  + 1,
                                WINDOW_HEIGHT - TPL_HEIGHT + 1 ),
                        IPL_DEPTH_32F, 1 );

	//eyezone
	eyezone1 = cvCreateImage(cvSize(50,50), IPL_DEPTH_8U, 1);
	minieyezone1 = cvCreateImage(cvSize(16,16), IPL_DEPTH_8U, 1);
	output1 = cvCreateImage(cvSize(512,512), IPL_DEPTH_8U, 1);
   	eyezone2 = cvCreateImage(cvSize(50,50), IPL_DEPTH_8U, 1);
	minieyezone2 = cvCreateImage(cvSize(16,16), IPL_DEPTH_8U, 1);
	output2 = cvCreateImage(cvSize(512,512), IPL_DEPTH_8U, 1);

	/* create a window and install mouse handler */
	cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
	cvSetMouseCallback( "video", mouseHandler, NULL );
	cvNamedWindow("output1", CV_WINDOW_AUTOSIZE); 
	cvNamedWindow("output2", CV_WINDOW_AUTOSIZE);

	gray = cvCreateImage (cvGetSize (frame), IPL_DEPTH_8U, 1);
	righteye_storage = cvCreateMemStorage (0);
	lefteye_storage = cvCreateMemStorage (0);
	CvPoint righteye_center, lefteye_center;
    
	// eye candidate
	CvRect righteye_cand1, righteye_cand2, lefteye_cand1, lefteye_cand2, right, left;
	int eye_candidate_num = 0;	



	while( key != 'q' ) {
		eye_candidate_num = 0;
		/* get a frame */
		frame = cvQueryFrame( capture );

		/* always check */
		if( !frame ) break;

		/* 'fix' frame */
		/*   cvFlip( frame, frame, -1 ); */
		frame->origin = 0;
        
		/* perform tracking if template is available */
		if( is_tracking ) trackObject();
        

		cvClearMemStorage (righteye_storage);
		cvClearMemStorage (lefteye_storage);
		cvCvtColor (frame, gray, CV_BGR2GRAY);
		cvEqualizeHist (gray, gray);
		righteye = cvHaarDetectObjects (gray, righteye_cascade, righteye_storage, 1.11, 4, 0, cvSize (40, 40), cvSize(40,40));
		lefteye = cvHaarDetectObjects (gray, lefteye_cascade, lefteye_storage, 1.11, 4, 0, cvSize (40, 40), cvSize(40,40));


		//右目を円で描画
		for (i = 0; i < (righteye ? righteye->total : 0); i++) {
			CvRect *r = (CvRect *) cvGetSeqElem (righteye, i);
			CvPoint center;
			int radius;
			center.x = cvRound (r->x + r->width * 0.5);
			center.y = cvRound (r->y + r->height * 0.5);
			radius = cvRound ((r->width + r->height) * 0.25);
			cvCircle (frame, center, radius, colors[i % 8], 3, 8, 0);
		//右目候補
			if(i == 0){
				righteye_cand1 = *r;
				}
			if(i == 1){
				righteye_cand2 = *r;
				}
			}
		//左目を死角で描画
		for (i = 0; i < (lefteye ? lefteye->total : 0); i++) {
			CvRect *r = (CvRect *) cvGetSeqElem (lefteye, i);
			CvPoint apex1, apex2;
			apex1 = cvPoint(r->x, r->y);
			apex2.x = cvRound(r->x + r->width);
			apex2.y = cvRound(r->y + r->height);
			cvRectangle (frame,apex1, apex2, colors[i % 8], 3, 8, 0);
			
		//左目候補
			if(i == 0){
				lefteye_cand1 = *r;
				}
			if(i == 1){
				lefteye_cand2 = *r;
				}
			}
		//候補しぼり
			if(righteye->total >= 1){
				if(righteye->total >= 2){
					if(righteye_cand1.x <= righteye_cand2.x){
						right = righteye_cand1;
						righteye_center.x = cvRound(right.x + right.width*0.5);
						righteye_center.y = cvRound(right.y + right.height*0.5);
						}			
					else{
						right = righteye_cand2;
						righteye_center.x = cvRound(right.x + right.width*0.5);
						righteye_center.y = cvRound(right.y + right.height*0.5);
						}
					}
				else{
					right = righteye_cand1;
					righteye_center.x = cvRound(right.x + right.width*0.5);
					righteye_center.y = cvRound(right.y + right.height*0.5);
					}
				eyezone1 = cvCreateImage(cvSize(right.width, right.height), IPL_DEPTH_8U, 1);
				cvGetRectSubPix(gray, eyezone1, cvPointTo32f(righteye_center));
				cvEqualizeHist(eyezone1, eyezone1);
				cvResize(eyezone1, minieyezone1, CV_INTER_LINEAR);
				cvResize(minieyezone1, output1, CV_INTER_NN);
			}



			if(lefteye->total >= 1){
				if(lefteye->total >= 2){
					if(lefteye_cand1.x >= lefteye_cand2.x){
						left = lefteye_cand1;
						lefteye_center.x = cvRound(left.x + left.width*0.5);
						lefteye_center.y = cvRound(left.y + left.height*0.5);
						}			
					else{
						left = lefteye_cand2;
						lefteye_center.x = cvRound(left.x + left.width*0.5);
						lefteye_center.y = cvRound(right.y + left.height*0.5);
						}
					}
				else{
					left = lefteye_cand1;
					lefteye_center.x = cvRound(left.x + left.width*0.5);
					lefteye_center.y = cvRound(left.y + left.height*0.5);
					}
				eyezone2 = cvCreateImage(cvSize(left.width, left.height), IPL_DEPTH_8U, 1);
				cvGetRectSubPix(gray, eyezone2, cvPointTo32f(lefteye_center));
				cvEqualizeHist(eyezone2, eyezone2);
				cvResize(eyezone2, minieyezone2, CV_INTER_LINEAR);
				cvResize(minieyezone2, output2, CV_INTER_NN);
			}
			printf("righteye width = %d, height = %d\n", right.width, right.height); 
			printf("lefteye width = %d, height = %d\n", left.width, left.height);
	//		printf("righteye x = %d\n", right.x);
	//		printf("lefteye x = %d\n", left.x);





		/* display frame */
		cvShowImage( "video", frame);
		//cvShowImage( "eyezone1", eyezone1);
		//cvShowImage( "eyezone2", eyezone2);
		cvShowImage( "output1", output1);
		cvShowImage( "output2", output2);

		//ファイル出力,時間計測
		time(&second);
		milsecond = clock();
	//	printf("時間[sec] = %ld\n", second);
		printf("経過時間[usec] = %ld\n", milsecond);
		//sprintf(filename, "%ld.bmp",second);
		//printf("sprintf = %s\n", filename);
		//cvSaveImage(filename, frame,0); 
	   	if(key == 'n'){
			sprintf(rightfilename, "n_right%ld.bmp", milsecond);
			sprintf(leftfilename, "n_left%ld.bmp", milsecond);
	      		printf("fileoutput %s, %s\n", rightfilename, leftfilename);
	  		cvSaveImage(rightfilename, minieyezone1, 0); 		
	  		cvSaveImage(leftfilename, minieyezone2, 0); 		
		}
	   	if(key == 'h'){
			sprintf(rightfilename, "h_right%ld.bmp", milsecond);
			sprintf(leftfilename, "h_left%ld.bmp", milsecond);
	      		printf("fileoutput %s, %s\n", rightfilename, leftfilename);
	  		cvSaveImage(rightfilename, minieyezone1, 0); 		
	  		cvSaveImage(leftfilename, minieyezone2, 0); 		
		}
	   	if(key == 'j'){
			sprintf(rightfilename, "j_right%ld.bmp", milsecond);
			sprintf(leftfilename, "j_left%ld.bmp", milsecond);
	      		printf("fileoutput %s, %s\n", rightfilename, leftfilename);
	  		cvSaveImage(rightfilename, minieyezone1, 0); 		
	  		cvSaveImage(leftfilename, minieyezone2, 0); 		
		}
	   	if(key == 'k'){
			sprintf(rightfilename, "k_right%ld.bmp", milsecond);
			sprintf(leftfilename, "k_left%ld.bmp", milsecond);
	      		printf("fileoutput %s, %s\n", rightfilename, leftfilename);
	  		cvSaveImage(rightfilename, minieyezone1, 0); 		
	  		cvSaveImage(leftfilename, minieyezone2, 0); 		
		}
	   	if(key == 'l'){
			sprintf(rightfilename, "l_right%ld.bmp", milsecond);
			sprintf(leftfilename, "l_left%ld.bmp", milsecond);
	      		printf("fileoutput %s, %s\n", rightfilename, leftfilename);
	  		cvSaveImage(rightfilename, minieyezone1, 0); 		
	  		cvSaveImage(leftfilename, minieyezone2, 0); 		
		}








		
		/* exit if user press 'q' */
		key = cvWaitKey( 1 );
		}

	/* free memory */
	cvDestroyWindow( "video" );
	cvDestroyWindow( "output1");
	cvDestroyWindow( "output2");
	cvReleaseCapture( &capture );
	cvReleaseImage( &tpl );
	cvReleaseImage( &tm );
	cvReleaseImage( &gray);
   	cvReleaseImage( &eyezone1);
	cvReleaseImage( &eyezone2);
	cvReleaseImage( &minieyezone1);
	cvReleaseImage( &minieyezone2);
	cvReleaseImage( &output1);
	cvReleaseImage( &output2);
	return 0;
	}
void getHoGOpenMP(IplImage* src, double* feat) {

	IplImage* img = cvCreateImage(cvSize(RESIZE_X,RESIZE_Y), IPL_DEPTH_8U, 1);
	cvResize(src, img);

	const int width = RESIZE_X;
	const int height = RESIZE_Y;

	double hist[CELL_WIDTH][CELL_HEIGHT][CELL_BIN];
	memset(hist, 0, CELL_WIDTH*CELL_HEIGHT*CELL_BIN*sizeof(double));

#pragma omp parallel for
	for(int y=0; y<height; y++){
		for(int x=0; x<width; x++){
			if(x==0 || y==0 || x==width-1 || y==height-1){
				continue;
			}
			double dx = img->imageData[y*img->widthStep+(x+1)] - img->imageData[y*img->widthStep+(x-1)];
			double dy = img->imageData[(y+1)*img->widthStep+x] - img->imageData[(y-1)*img->widthStep+x];
			double m = sqrt(dx*dx+dy*dy);
			double deg = (atan2(dy, dx)+CV_PI) * 180.0 / CV_PI;
			int bin = CELL_BIN * deg/360.0;
			if(bin < 0) bin=0;
			if(bin >= CELL_BIN) bin = CELL_BIN-1;
			hist[(int)(x/CELL_X)][(int)(y/CELL_Y)][bin] += m;
		}
	}

#pragma omp parallel for
	for(int y=0; y<BLOCK_HEIGHT; y++){
		for(int x=0; x<BLOCK_WIDTH; x++){
			double vec[BLOCK_DIM];
			memset(vec, 0, BLOCK_DIM*sizeof(double));
//#pragma omp for
			for(int j=0; j<BLOCK_Y; j++){
				for(int i=0; i<BLOCK_X; i++){
					for(int d=0; d<CELL_BIN; d++){
						int index = j*(BLOCK_X*CELL_BIN) + i*CELL_BIN + d;
						vec[index] = hist[x+i][y+j][d];
					}
				}
			}

			double norm = 0.0;
//#pragma omp for reduction(+:norm)
			for(int i=0; i<BLOCK_DIM; i++){
				norm += vec[i]*vec[i];
			}
//#pragma omp for
			for(int i=0; i<BLOCK_DIM; i++){
				vec[i] /= sqrt(norm + 1.0);
			}

//#pragma omp for
			for(int i=0; i<BLOCK_DIM; i++){
				int index = y*BLOCK_WIDTH*BLOCK_DIM + x*BLOCK_DIM + i;
				feat[index] = vec[i];
			}
		}
	}
	cvReleaseImage(&img);
	return;
}
Example #8
0
void ASEF::ComputeEyeLocations(IplImage *img, CvRect bb){


    assert(img->nChannels==1);


    IplImage *roi = cvCreateImage(cvSize(this->nCols, this->nRows), img->depth, 1);

    cvSetImageROI(img, bb);
    cvResize(img, roi, CV_INTER_LINEAR);
    cvResetImageROI(img);


    IplImage *roi_64 = cvCreateImage(cvGetSize(roi), IPL_DEPTH_64F, 1);

    cvConvertScale(roi, roi_64, 1./255., 0.);

    FaceNormIllu::do_NormIlluRETINA(roi_64, this->face_real, 5.0);


    cvMerge(this->face_real, this->face_im, 0, 0, this->complex_data);

    // do DFT
    cvDFT(this->complex_data, this->F, CV_DXT_FORWARD, this->complex_data->height);


    // G left
    cvMulSpectrums(this->F, this->LeftEyeDetector, this->Gl, CV_DXT_ROWS);

    cvDFT(this->Gl, this->Gl, CV_DXT_INV_SCALE, this->Gl->height);
    cvSplit(this->Gl, this->gl, 0, 0, 0);

    // G right
    cvMulSpectrums(this->F, this->RightEyeDetector, this->Gr, CV_DXT_ROWS);

    cvDFT(this->Gr, this->Gr, CV_DXT_INV_SCALE, this->Gl->height);
    cvSplit(this->Gr, this->gr,0,0,0);


    // add both responses
    double minV, maxV;
    cvMinMaxLoc(this->gl, &minV, &maxV);
    cvConvertScale(this->gl, this->gl, 1./(maxV-minV), -minV/(maxV-minV));

    cvMinMaxLoc(this->gr, &minV, &maxV);
    cvConvertScale(this->gr, this->gr, 1./(maxV-minV), -minV/(maxV-minV));

    cvAdd(this->gl, this->gr, this->g);

    cvMul(this->g, this->LeftEyeMask, this->gl);

    cvMul(this->g, this->RightEyeMask, this->gr);


    ///////////////////////////////////////////////////
    //  Compute Eye Locations
    ///////////////////////////////////////////////////
    float scale;

    cvSetImageROI(this->gl, cvRect(0,0, this->nCols>>1, this->nRows>>1));
    cvMinMaxLoc(this->gl, 0,0,0, &this->pEyeL);
    cvResetImageROI(this->gl);

    scale = (float)bb.width/(float)this->nCols;
    this->pEyeL.x=cvRound((float)this->pEyeL.x * scale + bb.x);
    this->pEyeL.y=cvRound((float)this->pEyeL.y * scale + bb.y);


    cvSetImageROI(this->gr, cvRect(this->nCols>>1, 0, this->nCols>>1, this->nRows>>1));
    cvMinMaxLoc(this->gr, 0,0,0, &this->pEyeR);
    cvResetImageROI(this->gr);

    scale = (float)bb.height/(float)this->nRows;
    this->pEyeR.x=cvRound((float)(this->pEyeR.x + this->nCols*0.5)* scale + bb.x);
    this->pEyeR.y=cvRound((float)this->pEyeR.y * scale + bb.y);


    cvReleaseImage(&roi);
    cvReleaseImage(&roi_64);
}
void faceDetector::runFaceDetector(IplImage *input)
{
    double t = (double)cvGetTickCount();

    static tracker faceTracker;
    static CvPoint fp1,fp2;
    faceInformation.LT= cvPoint(0,0);
    faceInformation.RB= cvPoint(0,0);
    faceInformation.Width=0;
    faceInformation.Height=0;
    if (input==0)
        return;

    IplImage *gray, *small_img;
    int i, j;
    int scale=1;

    gray = cvCreateImage( cvSize(input->width,input->height), 8, 1 );
    small_img = cvCreateImage( cvSize( cvRound (input->width/scale),
                                       cvRound (input->height/scale)), 8, 1 );

    cvCvtColor( input, gray, CV_BGR2GRAY );
    cvResize( gray, small_img, CV_INTER_LINEAR );
    cvClearMemStorage( storage );

    if ( cascade )
    {

        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                                            1.4, 2, 0
                                            // |CV_HAAR_FIND_BIGGEST_OBJECT
                                            //  |CV_HAAR_DO_ROUGH_SEARCH

                                            |CV_HAAR_DO_CANNY_PRUNING
                                            //|CV_HAAR_SCALE_IMAGE
                                            ,
                                            cvSize(80/scale, 80/scale) );
        int maxI=-1;
        int max0=0;

        for ( i = 0; i < (faces ? faces->total : 0); i++ )
        {
            CvRect* r = (CvRect*)cvGetSeqElem( faces, maxI);

            if (max0<(r->width*r->height));
            {
                max0=(r->width*r->height);
                maxI=i;
            }


        }


        if (maxI!=-1)
        {
            CvRect* r = (CvRect*)cvGetSeqElem( faces, maxI);
            faceInformation.LT.x=(r->x)*scale;
            faceInformation.LT.y=(r->y)*scale;
            faceInformation.RB.x=(r->x+ r->width)*scale;
            faceInformation.RB.y=(r->y+ r->height)*scale;
            faceInformation.Width=(r->width)*scale;
            faceInformation.Height=(r->height)*scale;
            IplImage *in=clipDetectedFace(input);
            //faceTracker.setModel(in);
            fp1=faceInformation.LT;
            fp2=faceInformation.RB;
            //         cvRectangle( input, faceInformation.LT, faceInformation.RB, CV_RGB(255,0,0), 3, 8, 0 );

        }

        // else
        //  cvRectangle( input, faceInformation.LT, faceInformation.RB, CV_RGB(0,255,0), 3, 8, 0 );
    }

    cvReleaseImage(&gray);
    cvReleaseImage(&small_img);
    double t1 = (double)cvGetTickCount();

//printf( "detection time = %gms\n",(t1-t)/((double)cvGetTickFrequency()*1000.));

}
Example #10
0
void process_image(IplImage* frame, int draw)
{
	int i, j;
	float t;

	uint8_t* pixels;
	int nrows, ncols, ldim;

	#define MAXNDETECTIONS 2048
	int ndetections;
	float rcsq[4*MAXNDETECTIONS];

	static IplImage* gray = 0;
	static IplImage* pyr[5] = {0, 0, 0, 0, 0};

	/*
		...
	*/

	//
	if(!pyr[0])
	{
		//
		gray = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1);

		//
		pyr[0] = gray;
		pyr[1] = cvCreateImage(cvSize(frame->width/2, frame->height/2), frame->depth, 1);
		pyr[2] = cvCreateImage(cvSize(frame->width/4, frame->height/4), frame->depth, 1);
		pyr[3] = cvCreateImage(cvSize(frame->width/8, frame->height/8), frame->depth, 1);
		pyr[4] = cvCreateImage(cvSize(frame->width/16, frame->height/16), frame->depth, 1);
	}

	// get grayscale image
	if(frame->nChannels == 3)
		cvCvtColor(frame, gray, CV_RGB2GRAY);
	else
		cvCopy(frame, gray, 0);

	// perform detection with the pico library
	t = getticks();

	if(usepyr)
	{
		int nd;

		//
		pyr[0] = gray;

		pixels = (uint8_t*)pyr[0]->imageData;
		nrows = pyr[0]->height;
		ncols = pyr[0]->width;
		ldim = pyr[0]->widthStep;

		ndetections = find_objects(rcsq, MAXNDETECTIONS, cascade, angle, pixels, nrows, ncols, ldim, scalefactor, stridefactor, MAX(16, minsize), MIN(128, maxsize));

		for(i=1; i<5; ++i)
		{
			cvResize(pyr[i-1], pyr[i], CV_INTER_LINEAR);

			pixels = (uint8_t*)pyr[i]->imageData;
			nrows = pyr[i]->height;
			ncols = pyr[i]->width;
			ldim = pyr[i]->widthStep;

			nd = find_objects(&rcsq[4*ndetections], MAXNDETECTIONS-ndetections, cascade, angle, pixels, nrows, ncols, ldim, scalefactor, stridefactor, MAX(64, minsize>>i), MIN(128, maxsize>>i));

			for(j=ndetections; j<ndetections+nd; ++j)
			{
				rcsq[4*j+0] = (1<<i)*rcsq[4*j+0];
				rcsq[4*j+1] = (1<<i)*rcsq[4*j+1];
				rcsq[4*j+2] = (1<<i)*rcsq[4*j+2];
			}

			ndetections = ndetections + nd;
		}
	}
	else
	{
Example #11
0
void LegsDetector::update(const std::vector< laser_t >& laserBuffer)
{
   // first remove high peaks due to absorving materials
   laser_t laser[BUFFERLENGTH];
   for (int i = 0; i < _bufferLength; i++)
   {
      laser[i].range = DBL_MAX;
      double angle = laser[i].angle = laserBuffer[i].angle;
	  for (int k = max(0, i-_delta); k <= min( _bufferLength-1, i+_delta); k++)
      {
         double range;
         if (laserBuffer[k].range < laser[i].range)
         {
            range = laser[i].range = laserBuffer[k].range;
            laser[i].x = range * cos(angle);
            laser[i].y = range * sin(angle);
         }
      }
   }
   //                       (0)
   //                        |
   //                        |
   //                        |
   // (+90)------------------|-------------------(-90)
   // reading from right to left i.e. from -90 to +90
   //
   // start extracting all the vertical edges of interest
   // remembering the scan goes from right (-PI/2) to left (+PI/2)
   // left and right edges correspond to the robot's point of view
   //
   //                 -(p1)             (p1)-
   //                   |    (p1)-(p1)   |
   //                   |     |    |     |
   //                   |     |   l|     |r
   //                   |     |    |     |
   //                  L|     |R  (p2)--(p2)
   //                   |     |
   //                   |     |
   //                  (p2)--(p2)
   //
   vector< edge_t<point_t> > vEdge;
   double prevRange = laser[0].range;
   for (int id = 1; id < _bufferLength; id++)
   {
      double range = laser[id].range;

      //if ( range == MAXIMUM_RANGE  || prevRange == MAXIMUM_RANGE ) ;
	  if ((prevRange - range) > MIN_LONG_EDGE)      // possible left long edge
      {
		  edge_t<point_t> e = {Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle),
                              Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle), 'R'};
         vEdge.push_back(e);
      }
      else if ((range - prevRange) > MIN_LONG_EDGE) // possible right long edge
      {
         edge_t<point_t> e = {Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle),
                              Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle), 'L'};
         vEdge.push_back(e);
      }
      else if ((prevRange - range) > MIN_SHORT_EDGE) // possible left short edge
      {
         edge_t<point_t> e = {Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle),
                              Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle), 'r'};
         vEdge.push_back(e);
      }
      else if ((range - prevRange) > MIN_SHORT_EDGE) // possible right short edge
      {
         edge_t<point_t> e = {Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle),
                              Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle), 'l'};
         vEdge.push_back(e);
      }

      prevRange = range;
   }
   // remove edges too close to each other
   if ( vEdge.empty() ) return;
   vector<edge_t<point_t> >::iterator first = vEdge.begin();
   vector<edge_t<point_t> >::iterator second = first + 1;
   double d1, d2;
   char t1, t2;
   while (second < vEdge.end())
   {
	   t1 = toupper(first->type);
       t2 = toupper(second->type);
	   d1 = getDistance(second->p1, first->p2);
	   d2 = getDistance(first->p1, second->p2);
       if ( t1 == 'R' && t2 == 'R' && d1 < MIN_EDGE_DIST )
       {
		   first->p2 = second->p2;
           first->type = 'R';
           second = vEdge.erase(second);
        }
        else if ( t1 == 'L' && t2 == 'L' && d2 < MIN_EDGE_DIST )
        {
			first->p1 = second->p1;
            first->type = 'L';
            second = vEdge.erase(second);
	   }
       else
       {
		   first++;
           second++;
       }
   }
   if ( vEdge.empty() ) return;
   // draw some stuff for debugging... (must be done now, before vEdge is modified)
   if (_debug)
   {
      CvPoint start;
	  cvSet(_tmpImg, cvScalar(255,255,255));

	  start = cvPoint(DEBUG_WINDOW_WIDTH/2, 0);
	  cvCircle(_tmpImg, start, 1*DEBUG_WINDOW_WIDTH/80, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 1*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 2*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 3*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 4*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 5*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 6*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 7*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 8*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));

      start = cvPoint(METER2PIXEL(laser[0].y) + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(laser[0].x));
      // draw the laser data
      for (int i = 1; i < _bufferLength; i++)
      {
         CvPoint end = cvPoint(METER2PIXEL(laser[i].y) + DEBUG_WINDOW_WIDTH/2,
                               METER2PIXEL(laser[i].x));

		 if (laser[i].range == MAXIMUM_RANGE && laser[i-1].range == MAXIMUM_RANGE)
			 cvLine(_tmpImg, start, end, cvScalar(0,0,0));
		 if (laser[i].range <  MAXIMUM_RANGE && laser[i-1].range <  MAXIMUM_RANGE)
			 cvLine(_tmpImg, start, end, cvScalar(0,0,0));

		 start = end;
      }
      // draw the extremes
      for (unsigned int i = 0; i < vEdge.size(); i++)
      {
         CvScalar color;
		 switch (vEdge[i].type)
         {
            case 'R':
               color = cvScalar(0,0,255); // red
               break;
            case 'L':
               color = cvScalar(255,0,0); // blue
               break;
            case 'r':
               color = cvScalar(0,196,255);  // yellow
               break;
            case 'l':
               color = cvScalar(64,255,0);  // green
               break;
         }
		 // draw min extremes
		 CvPoint center = cvPoint(METER2PIXEL(vEdge[i].p1.y) + DEBUG_WINDOW_WIDTH/2,
                                  METER2PIXEL(vEdge[i].p1.x));
         cvCircle(_tmpImg, center, 2, color);
         // draw max extremes
         CvPoint c1 = cvPoint(METER2PIXEL(vEdge[i].p2.y) - 3 + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(vEdge[i].p2.x) - 3);
         CvPoint c2 = cvPoint(METER2PIXEL(vEdge[i].p2.y) + 3 + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(vEdge[i].p2.x) + 3);
         cvRectangle(_tmpImg, c1, c2, color);
      }
   }

   // extract the horizontal lines of interest
   vector< edge_t<point_t> > hEdge;
   int temp = 1;
   while ( temp > 0 ) { temp = getUpattern(vEdge, hEdge); }
   temp = 1;
   while ( _selectivity < 2 && temp > 0 ) { temp = getPpattern(vEdge, hEdge);}
   temp = 1;
   while ( _selectivity < 1 && temp > 0 ) { temp = getOpattern(vEdge, hEdge);}

   // finally calculate distance and direction of each horizontal line
   _target.clear();
   vector< edge_t<point_t> >::iterator itend = hEdge.end();
   for (vector< edge_t<point_t> >::iterator it = hEdge.begin(); it < itend; it++)
   {
      target_t t;
      // the distance is an average between the two points
      double xm = ((it->p1).x + (it->p2).x) / 2;
      double ym = ((it->p1).y + (it->p2).y) / 2;
      t.distance = sqrt(sqr(xm) + sqr(ym));
      // left PI/2, right -PI/2
      t.bearing = atan2(ym, xm);
      // no height information of course...
      t.pattern = it->type;
      _target.push_back(t);
   }
   // final number of detected people
   _howMany = _target.size();
   // draw the last things for debugging
   if (_debug)
   {
      // draw horizontal edges
      for (unsigned int i = 0; i < hEdge.size(); i++)
      {
         CvPoint p1 = cvPoint(METER2PIXEL(hEdge[i].p1.y) + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(hEdge[i].p1.x));
         CvPoint p2 = cvPoint(METER2PIXEL(hEdge[i].p2.y) + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(hEdge[i].p2.x));
//          cvLine(_tmpImg, p1, p2, cvScalar(0,128,255), 2);
         CvPoint pm = cvPoint((p1.x + p2.x) / 2, (p1.y + p2.y) / 2);
         int thick;
         if (hEdge[i].type == 'U')
            thick = 3;
         else if (hEdge[i].type == 'P')
            thick = 2;
         else
            thick = 1;
         cvLine(_tmpImg, cvPoint(DEBUG_WINDOW_WIDTH/2, 0), pm, cvScalar(0,128,255), thick);
      }

      cvFlip(_tmpImg, NULL, -1);
      cvResize(_tmpImg, _debugImage, CV_INTER_NN);
      cvShowImage("Legs detector", _debugImage);
 	  if (_delay)
        cvWaitKey(_delay);  // handles event processing of HIGHGUI library
   }
   return;
}
Example #12
0
 int main() {
  CvPoint pt1b,pt2b, pt1t,pt2t,ptarry[4];
  int tempwidth,tempheight;
  CvRect regt,rectROIbot,rectROItop;
  rectROItop=cvRect(0,0,80,10);
  rectROIbot=cvRect(0,50,80,10);
  CvPoint b_cir_center,t_cir_center;
  CvPoint frame_center;
  CvPoint A,B,C,D;
  CvPoint temp;
  double angle,spinsize;
  int cir_radius=1; 
  int frame_width=160, frame_height=120;
  IplImage* frame;
  IplImage* threshframe;
  IplImage* hsvframe;
  IplImage* threshframebot;
  IplImage* threshframetop;
  IplImage* modframe;
  IplImage* dilframetop;
  IplImage* dilframebot;
  int moddiv=2,seq=0,seqdiv=2;
  int release=0, rmax=100;
  int modfheight, modfwidth;
  unsigned char sendBuf;/*
  int serial;
  serial = openSerial("/dev/ttyACM0");
  if (serial == -1)
  serial = openSerial("/dev/ttyACM1");
  if (serial == -1)
  serial = openSerial("/dev/ttyACM2"); 
  if (serial == -1)
  serial = openSerial("/dev/ttyACM3");	
  if (serial == -1)
  serial = openSerial("/dev/ttyACM4");	
  if (serial == -1)
  serial = openSerial("/dev/ttyACM5");
  if (serial == -1)
  serial = openSerial("/dev/ttyACM6"); 
  if (serial == -1)
  serial = openSerial("/dev/ttyACM7");	
  if (serial == -1)
  serial = openSerial("/dev/ttyACM8");	
if( serial == -1 ) {
return -1;
}*/
   //CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
   CvCapture* capture = cvCaptureFromCAM( 1 );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
 // cvSetCaptureProperty(capture, CV_CAP_PROP_FPS,10);
//  cvSetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES,5);  
 // Create a window in which the captured images will be presented
   cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   // Show the image captured from the camera in the window and repeat
   while ( 1 ) {
     // Get one frame
      frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }

     modfheight = frame->height;
     modfwidth = frame->width;
      modframe = cvCreateImage(cvSize((int)(modfwidth/moddiv),(int)(modfheight/moddiv)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
      hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
      
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)

      threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
    // cvInRangeS(hsvframe,cvScalar(0, 180, 140),cvScalar(15, 230, 235),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) red
     cvInRangeS(hsvframe,cvScalar(70, 180, 40),cvScalar(100, 230, 90),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);

      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
 
//////////////////////////////////////////////////////////////////////////////////////////
    if (seq==0) {
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);
     dilframebot = cvCreateImage(cvGetSize(threshframebot),8,1);
     cvDilate(threshframebot,dilframebot,NULL,2); //cvDilate(input frame,

   //  tempwidth=cvGetSize(dilframebot).width;
   //  tempheight=cvGetSize(dilframebot).height;
   //  printf("dilframe: %d, %d \n",tempwidth,tempheight);
     CBlobResult blobs_bot;
     blobs_bot = CBlobResult(dilframebot,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_bot.Filter(blobs_bot,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_bot;
     blobs_bot.GetNthBlob(CBlobGetArea(),0,biggestblob_bot); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1b.x = biggestblob_bot.MinX()*moddiv;
     pt1b.y = biggestblob_bot.MinY()*moddiv+100;
     pt2b.x = biggestblob_bot.MaxX()*moddiv;
     pt2b.y = biggestblob_bot.MaxY()*moddiv+100;
     b_cir_center.x=(pt1b.x+pt2b.x)/2;
     b_cir_center.y=(pt1b.y+pt2b.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////////
    if(seq==seqdiv){
      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
      dilframetop = cvCreateImage(cvGetSize(threshframetop),8,1);
     cvDilate(threshframetop,dilframetop,NULL,2); //cvDilate(input frame,
     CBlobResult blobs_top;
     blobs_top = CBlobResult(dilframetop,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_top.Filter(blobs_top,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_top;
     blobs_top.GetNthBlob(CBlobGetArea(),0,biggestblob_top); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1t.x = biggestblob_top.MinX()*moddiv;
     pt1t.y = biggestblob_top.MinY()*moddiv;
     pt2t.x = biggestblob_top.MaxX()*moddiv;
     pt2t.y = biggestblob_top.MaxY()*moddiv;
     t_cir_center.x=(pt1t.x+pt2t.x)/2;
     t_cir_center.y=(pt1t.y+pt2t.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////
   if(seq==seqdiv+2) {
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
     A.x=frame_center.x-4;
     A.y=frame_center.y;
     B.x=frame_center.x+4;
     B.y=frame_center.y;
     C.y=frame_center.y-4;
     C.x=frame_center.x;
     D.y=frame_center.y+4;
     D.x=frame_center.x;
     cvRectangle(frame,pt1t,pt2t,cvScalar(255,0,0),1,8,0);
     cvRectangle(frame,pt1b,pt2b,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
     //cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0);
     cvCircle( frame, b_cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle
     cvLine(frame, A, B,cvScalar(255,0,255),2,8,0);
     cvLine(frame, C, D,cvScalar(255,0,255),2,8,0);
    

    if (b_cir_center.x!=0&&b_cir_center.y!=100) 
    {
    cvLine(frame, b_cir_center, frame_center,cvScalar(0,255,0),1,8,0);
    }

    if(t_cir_center.x!=0&&t_cir_center.y!=0)
     {
     cvLine(frame, frame_center, t_cir_center,cvScalar(255,255,0),1,8,0);
     }
     if ((b_cir_center.x!=0&&b_cir_center.y!=100)&&(t_cir_center.x!=0&&t_cir_center.y!=0)) 
     {
     cvLine(frame, b_cir_center, t_cir_center,cvScalar(0,255,255),1,8,0);
     printf("%d, %d, %d, %d\n",t_cir_center.x,t_cir_center.y,b_cir_center.x,b_cir_center.y);
     }
		
}
    seq++;
    seq=seq%(seqdiv+4);
     cvShowImage( "mywindow", frame); // show output image
     cvShowImage( "bot", threshframebot);
     cvShowImage( "top", threshframetop);

   //  cvShowImage("croped",cropped);
     //cvShowImage( "mywindow3", dilframeROI);
     // Do not release the frame!
     //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27 ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   cvDestroyWindow( "mywindow" );
   return 0;

 }
bool CxFaceAnalyzer::Face_Detection(IplImage *pImgSrc,int nMin_FaceSize = 80, char *ThumbnailImgFilename=NULL)
{
	if(pImgSrc == NULL) return false;

    if( m_pImgGray != NULL )
        cvReleaseImage( &m_pImgGray );
	
	m_pImgGray = cvCreateImage(cvGetSize(pImgSrc), IPL_DEPTH_8U, 1);

	if( pImgSrc->nChannels == 4 )
		cvCvtColor( pImgSrc, m_pImgGray, CV_BGRA2GRAY );
	if( pImgSrc->nChannels == 3 )
		cvCvtColor( pImgSrc, m_pImgGray, CV_BGR2GRAY );
	if( pImgSrc->nChannels == 1 )
		cvCopy( pImgSrc, m_pImgGray );

	SetFaceSizeRange(m_nFaceDetectorNo, nMin_FaceSize, pImgSrc->width*0.5);
	// do face tracking
	//m_face_num = m_Facedetector->detect( m_pImgGray, m_rects, MAX_FACES);
	FdRect FaceArea[MAX_FACES];
	//m_face_num = FrontalView_FaceDetection(m_nFaceDetectorNo, m_pImgGray, FaceArea);
	m_face_num = FrontalView_ColorImage_FaceDetection(m_nFaceDetectorNo, m_pImgGray, FaceArea, 0);
	for(int i=0;i<m_face_num;i++)
	{
		m_rects[i].rc.x = FaceArea[i].x;
		m_rects[i].rc.y = FaceArea[i].y;
		m_rects[i].rc.width = FaceArea[i].width;
		m_rects[i].rc.height = FaceArea[i].height;
		m_rects[i].angle = FaceArea[i].view;
	}

	ClearFaceSizeRange(m_nFaceDetectorNo);
	//detect and recognize each face
	int nLargest_Face_Size = -1;
	int nLargest_ID = -1;
	bool    bLandmark;
	CvRect rect;
	CvPoint2D32f* landmark6 ;
	int    angle;
	
	for( int i=0; i < m_face_num; i++ )
	{
		m_cutface_flag[i] = 0;
		// init
		// get face rect and id from tracker
		rect = m_rects[i].rc;
		angle = m_rects[i].angle;

		if(rect.x+rect.width  > m_pImgGray->width  || rect.x < 0) continue;
		if(rect.y+rect.height > m_pImgGray->height || rect.y < 0) continue;
		if(rect.width<nMin_FaceSize) continue;
		// detect landmark 

        landmark6 = m_ldmks[i];
		bLandmark = false;
 
		bLandmark = m_plandmarkDetector->detect( m_pImgGray, &rect, landmark6, NULL, angle );
		if(bLandmark ==false) continue;

		if(rect.width> nLargest_Face_Size)
		{
			nLargest_Face_Size = rect.width;
			nLargest_ID = i;
		}
	}
	if(nLargest_ID>-1)
	{
		landmark6 = m_ldmks[nLargest_ID];
		rect = m_rects[nLargest_ID].rc;
		alignFace2(m_pImgGray, landmark6, &rect, m_cutface_big->width, m_cutface_big->height, false, m_age_sclxyud, m_cutface_big);

		cvResize(m_cutface_big, m_cutface_small);
		
		IplImage *lpTest = alignFace3(pImgSrc, landmark6, &rect, m_cutface_big->width * 2, m_cutface_big->height * 2, false, m_age_sclxyud, NULL);
		cvSaveImage(ThumbnailImgFilename,lpTest);
		cvReleaseImage(&lpTest);

	}
	
	cvReleaseImage(&m_pImgGray);	
	m_pImgGray = NULL;
	if(nLargest_ID>-1)
		return true;
	else return false;
}
Example #14
0
int main(int argc, char** argv) 
{
  // GLOBAL SETTINGS
  static int framecounter=0;
  const CvSize imsize = cvSize(320,240);
  int delay = 0;
  
  const int win_size = 10;
  CvSize pyr_sz = cvSize( imsize.width+8, imsize.height/3 );
  IplImage * pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
  IplImage * pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
  IplImage * rawImage_resized = cvCreateImage( imsize, IPL_DEPTH_8U, 3);

  cvNamedWindow("Test");
  CvGenericTracker tracker;

  // LOAD INPUT FILE
  CvCapture * capture = NULL;
  if (argc==1) {
    capture = cvCreateCameraCapture(0);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, imsize.width);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, imsize.height);
  }else{
    capture = cvCreateFileCapture(argv[1]);
  }
  if (!capture) {fprintf(stderr, "Error: fail to open source video!\n");return 0;}
  cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter);

  // START ENDLESS LOOP
  while(1)
  {
	// GET NEXT FRAME
    if (1){
      cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter++);
    }else{
      framecounter++;
    }
    IplImage * rawImage = cvQueryFrame(capture);
	cvResize(rawImage,rawImage_resized);
    if (!rawImage) {fprintf(stderr, "Info: end of video!\n"); break;}
    if (tracker.initialized()){
      tracker.update(rawImage_resized);
    }else{
      tracker.initialize(rawImage_resized);
      tracker.m_framecounter=framecounter;
    }

    // START PROCESSING HERE
    {
	  // Initialize, load two images from the file system, and
	  // allocate the images and other structures we will need for
	  // results.
	  CvMat * imgA = tracker.m_currImage;
	  IplImage * imgB = tracker.m_nextImage;
	  IplImage * imgC = cvCloneImage(rawImage_resized);
  
	  // The first thing we need to do is get the features
	  // we want to track.
	  IplImage * eig_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
	  IplImage * tmp_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
	  int corner_count = MAX_CORNERS;
	  CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
	  cvGoodFeaturesToTrack(imgA,eig_image,tmp_image,cornersA,&corner_count,0.01,5.0,0,3,0,0.04);
	  cvFindCornerSubPix(imgA,cornersA,corner_count,cvSize(win_size,win_size),cvSize(-1,-1),
						 cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));

	  // Call the Lucas Kanade algorithm
	  char features_found[ MAX_CORNERS ];
	  float feature_errors[ MAX_CORNERS ];
	  CvPoint2D32f * cornersB = new CvPoint2D32f[ MAX_CORNERS ];
	  cvCalcOpticalFlowPyrLK(imgA,imgB,pyrA,pyrB,
							 cornersA,cornersB,corner_count,cvSize( win_size,win_size ),
							 5,features_found,feature_errors,
							 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
							 (framecounter<2)?0:CV_LKFLOW_PYR_B_READY);

	  // Now make some image of what we are looking at:
	  for( int i=0; i<corner_count; i++ ) {
		if( features_found[i]==0|| feature_errors[i]>550 ) {
		  fprintf(stderr,"error=%f\n",feature_errors[i]);continue;
		}
		CvPoint p0 = cvPoint(cvRound( cornersA[i].x ),cvRound( cornersA[i].y ));
		CvPoint p1 = cvPoint(cvRound( cornersB[i].x ),cvRound( cornersB[i].y ));
		cvLine( imgC, p0, p1, CV_RGB(255,0,0), 1 );
	  }

	  cvShowImage("Test",imgC);
	  cvReleaseImage(&imgC);
	  cvReleaseImage(&eig_image);
	  cvReleaseImage(&tmp_image);
	  delete [] cornersA;
	  delete [] cornersB;
	}
	
	// DISPLAY PROCESSING RESULT
	int key = cvWaitKey(delay)&0xff;
	if (key==27){
	  break;
	}else if (key==' '){
	  if (delay){ delay = 0; }else{ delay = 30; }
	}else if (key=='f'){ // skip to next frame
	}else if (key=='S'){ // skip to next frame
	  framecounter+=10;fprintf(stderr,"framecount:%d\n",framecounter);
	}else if (key=='Q'){ // skip to next frame
	  framecounter=MAX(1,framecounter-10);fprintf(stderr,"framecount:%d\n",framecounter);
	}else if (key!=0xff){
	  fprintf(stderr, "Warning: Unknown key press : %c\n", key);
	} // end of key press processing
  } // end of video

  cvReleaseImage(&pyrA);
  cvReleaseImage(&pyrB);
  cvReleaseImage(&rawImage_resized);

  return 0;
}
Example #15
0
// Function to detect and draw any faces that is present in an image
void detect_and_draw( IplImage* temp )
{
    IplImage *grey = cvCreateImage(cvGetSize(temp), 8, 1);
    cvCvtColor(temp, grey, CV_RGB2GRAY);
    IplImage* face = cvCreateImage(cvSize(100,100), 8, 1);
    IplImage *faces_hist[NUM_FACES];
    int i,j;
    for(i=0;i<NUM_FACES;i++) {
	faces_hist[i] = cvCreateImage(cvSize(100,100), 8, 1);	
    	cvZero(faces_hist[i]);
    }

    cvZero(face);
    // Create two points to represent the face locations
    CvPoint pt1, pt2, e_pt1, e_pt2;

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

    // Find whether the cascade is loaded, to find the faces. If yes, then:
    if( cascade )
    {

        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence
        CvSeq* faces = cvHaarDetectObjects( grey, cascade, storage,
                                            1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(40, 40) );

        // Loop the number of faces found.
        if (faces)
		printf("Number of faces: %d\n", faces->total);
	for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
           // Create a new rectangle for drawing the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

            // Find the dimensions of the face,and scale it if necessary
            pt1.x = r->x;
            pt2.x = (r->x+r->width);
            pt1.y = r->y;
            pt2.y = (r->y+r->height);

            cvSetImageROI(grey, cvRect(pt1.x, pt1.y, r->width, r->height));


            CvSeq* eyes = cvHaarDetectObjects(grey, cascade_eyes, storage, 1.1, 5, 0, cvSize(25,15));
            printf("Eyes: %p num: %d\n", eyes, eyes->total);
            for( j=0;j < (eyes ? eyes->total : 0); j++ ) {
                CvRect *e = (CvRect*)cvGetSeqElem(eyes, j);
                e_pt1.x = e->x;
                e_pt2.x = (e->x+e->width);
                e_pt1.y = e->y;
                e_pt2.y = (e->y+e->height);
                cvRectangle(grey, e_pt1, e_pt2, CV_RGB(255,255,255), 3, 8, 0);
            }

	    cvResize(grey, face, CV_INTER_LINEAR);
	    cvResetImageROI(grey);

            if (i < NUM_FACES)
		cvEqualizeHist(face, faces_hist[i]);
            
	    // Draw the rectangle in the input image
            cvRectangle( grey, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
        }
    }

    // Show the image in the window named "result"
    //cvShowImage( "result", temp );
    cvShowManyImages("result", 6, temp, grey, face, faces_hist[0], faces_hist[1], faces_hist[2]);

    // Release the temp image created.
    cvReleaseImage( &face );
    cvReleaseImage( &grey );
    for(i=0;i<NUM_FACES;i++)
	cvReleaseImage(&faces_hist[i]);
}
Example #16
0
IplImage *
camera_control_query_frame(CameraControl* cc)
{
    IplImage* result;

#if defined(CAMERA_CONTROL_USE_CL_DRIVER)
    // assign buffer-pointer to address of buffer
    cvGetRawData(cc->frame4ch, &cc->pCapBuffer, 0, 0);

    CLEyeCameraGetFrame(cc->camera, cc->pCapBuffer, 2000);

    // convert 4ch image to 3ch image
    const int from_to[] = { 0, 0, 1, 1, 2, 2 };
    const CvArr** src = (const CvArr**) &cc->frame4ch;
    CvArr** dst = (CvArr**) &cc->frame3ch;
    cvMixChannels(src, 1, dst, 1, from_to, 3);

    result = cc->frame3ch;
#else
    long start = psmove_util_get_ticks();
    result = cvQueryFrame(cc->capture);
    psmove_DEBUG("cvQueryFrame: %ld ms\n", psmove_util_get_ticks() - start);
#endif

#if defined(PSMOVE_USE_DEINTERLACE)
    /**
     * Dirty hack follows:
     *  - Clone image
     *  - Hack internal variables to make an image of all odd lines
     **/
    IplImage *tmp = cvCloneImage(result);
    tmp->imageData += tmp->widthStep; // odd lines
    tmp->widthStep *= 2;
    tmp->height /= 2;

    /**
     * Use nearest-neighbor to be faster. In my tests, this does not
     * cause a speed disadvantage, and tracking quality is still good.
     *
     * This will scale the half-height image "tmp" to the original frame
     * size by doubling lines (so we can still do normal circle tracking).
     **/
    cvResize(tmp, result, CV_INTER_NN);

    /**
     * Need to revert changes in tmp from above, otherwise the call
     * to cvReleaseImage would cause a crash.
     **/
    tmp->height = result->height;
    tmp->widthStep = result->widthStep;
    tmp->imageData -= tmp->widthStep; // odd lines
    cvReleaseImage(&tmp);
#endif

    // undistort image
    if (cc->mapx && cc->mapy) {
        cvRemap(result, cc->frame3chUndistort,
                cc->mapx, cc->mapy,
                CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS,
                cvScalarAll(0));
        result = cc->frame3chUndistort;
    }

    return result;
}
/**
 * process_and_show_images
 *
 * show the different images and save a video file with the results
 *
 */
int
process_and_show_images (const char* name_output_video,
                         track_t list_of_tracks [MAX_CAM - MIN_CAM][MAX_FRAMES])
{

  IplImage *ground_plane_image = NULL, *cam[MAX_CAM - MIN_CAM];
  char cam_name [5][255];
  IplImage *DispImage;
  int current_frame, i, j ,k;
  float x_fused = 0.0, y_fused = 0.0, x_fused_errors = 0.0, y_fused_errors = 0.0;
  CvVideoWriter *writer = NULL;
  CvSize s = cvSize (X_RESOLUTION, Y_RESOLUTION);
  int   contcam[MAX_CAM - MIN_CAM] = {0, 0, 0, 0, 0};
  float acumcam_x[MAX_CAM - MIN_CAM] = {0.0, 0.0, 0.0, 0.0, 0.0};
  float acumcam_y[MAX_CAM - MIN_CAM] = {0.0, 0.0, 0.0, 0.0, 0.0};

#ifdef DEBUG
    printf ("process_and_show_images() \n");
#endif

#ifdef SHOW_IMAGE
    printf ("Creating output video file: %s\n", name_output_video);
    writer = cvCreateVideoWriter (name_output_video, CV_FOURCC ('M', 'J', 'P', 'G'), 15, s, 1);
    //writer = cvCreateVideoWriter (name_output_video, -1, 15, s, 1);
    cvNamedWindow ("Apidis", CV_WINDOW_AUTOSIZE);
    DispImage = cvCreateImage (cvSize (X_RESOLUTION, Y_RESOLUTION), IPL_DEPTH_8U, 3);
#endif

    /* for each frame */
    for (current_frame = 0; current_frame < MAX_FRAMES; current_frame++) {
      printf (".");
      fflush (stdout);
#ifdef SHOW_IMAGE
      sprintf (cam_name[0], "%s-%04d.jpeg", LOCATION_CAM1_IMAGES, current_frame);
      sprintf (cam_name[1], "%s-%04d.jpeg", LOCATION_CAM2_IMAGES, current_frame);
      sprintf (cam_name[2], "%s-%04d.jpeg", LOCATION_CAM4_IMAGES, current_frame);
      /* Note that cam3 and cam4 are mapped to camera 6 and camera 7 images */
      sprintf (cam_name[3], "%s-%04d.jpeg", LOCATION_CAM6_IMAGES, current_frame);
      sprintf (cam_name[4], "%s-%04d.jpeg", LOCATION_CAM7_IMAGES, current_frame);

      for (i = 0; i < MAX_CAM; i++) {
        cam[i] = cvLoadImage (cam_name[i], CV_LOAD_IMAGE_COLOR);
          if (cam[i] == NULL) {
            printf ("Error opening image: %s \n", cam_name[i]);
            exit (-1);
          }
        }

      if (ground_plane_image == NULL)
        ground_plane_image = cvLoadImage (LOCATION_GROUND_PLANE_IMAGE, CV_LOAD_IMAGE_COLOR);

      cvSetImageROI (DispImage, cvRect (0, 0, 400, 300));
#endif
        for (i = 0; i < MAX_CAM; i++) {
          if (track_x (i, current_frame) != 0) { /* if camera 1 in current frame have information */
#ifdef SHOW_IMAGE
            cvRectangle (cam[i], cvPoint ((int) track_x (i, current_frame), (int) track_y (i, current_frame)),
            cvPoint ((int) track_x (i, current_frame) + track_w (i, current_frame),
                     (int) track_y (i, current_frame) + track_h (i, current_frame)), CV_RGB (255,255,0), 2, 8, 0);

            cvRectangle (cam[i], cvPoint ((int) track_x_kalman (i, current_frame), (int) track_y_kalman (i, current_frame)),
            cvPoint ((int) track_x_kalman (i, current_frame) + track_w_kalman (i, current_frame),
                     (int) track_y_kalman (i, current_frame) + track_h_kalman (i, current_frame)), CV_RGB (255,0,0), 2, 8, 0);
#endif
            if (contcam[i] < 5) /* wait for having 5 values */
              contcam[i]++;

            if (contcam[i] == 5) {/* save stats */
              /* get the current mean */
              acumcam_x[i] = 0.0;
              acumcam_y[i] = 0.0;
              for (j = 0; j< 5; j++) {
                acumcam_x[i] += track_px_global_kalman (i, current_frame - j);
                acumcam_y[i] += track_py_global_kalman (i, current_frame - j);
              }
              px_global_mean_5 (i, current_frame) = acumcam_x[i] / (float) contcam[i];
              py_global_mean_5 (i, current_frame) = acumcam_y[i] / (float) contcam[i];

              for (j = 0; j < 5; j++) {
                px_global_var_5 (i, current_frame) +=
                    pow(track_px_global (i, current_frame-j) - px_global_mean_5 (i, current_frame),2);
                py_global_var_5 (i, current_frame) +=
                    pow(track_py_global (i, current_frame-j) - py_global_mean_5 (i, current_frame),2);

              }
              px_global_var_5 (i, current_frame) = px_global_var_5 (i, current_frame) / 5.0;
              py_global_var_5 (i, current_frame) = py_global_var_5 (i, current_frame) / 5.0;
            }
        }
        /* if we lost the objective initialize */
        if (track_x (i, current_frame) == 0)
          contcam[i] = 0;

#ifdef SHOW_IMAGE
        cvResize (cam[i], DispImage, CV_INTER_AREA);
        cvResetImageROI (DispImage);
        if (i == 0)
          cvSetImageROI (DispImage, cvRect (400, 0, 400, 300));
        if (i == 1)
          cvSetImageROI (DispImage, cvRect (800, 0, 400, 300));
        if (i == 2)
          cvSetImageROI (DispImage, cvRect (0, 300, 400, 300));
        if (i == 3)
          cvSetImageROI (DispImage, cvRect (400, 300, 400, 300));
        if (i == 4)
          cvSetImageROI (DispImage, cvRect (800, 300, 400, 300));
#endif
        } /* for each camera */

        x_fused = fused_px_global (current_frame, list_of_tracks);
        y_fused = fused_py_global (current_frame, list_of_tracks);

        x_fused_errors = fused_px_global_errors (current_frame, list_of_tracks);
        y_fused_errors = fused_py_global_errors (current_frame, list_of_tracks);

        /* for each camera, save the fused data in the data structure list_of_tracks */
        for (k = 0; k < (MAX_CAM - MIN_CAM); k++) {
           track_fused_x (k, current_frame) = x_fused;
           track_fused_y (k, current_frame) = y_fused;
         }

        /* save the fused error (difference between filtered data and fused data) */
        for (k = 0; k < (MAX_CAM - MIN_CAM); k++) {
          if (track_py_local (k, current_frame) > 0) {
               track_error_x (k, current_frame) = abs (track_fused_x (k, current_frame) - track_px_global_kalman (k, current_frame));
               track_error_y (k, current_frame) = abs (track_fused_y (k, current_frame) - track_py_global_kalman (k, current_frame));
           }
        }

        /* get the absolute errors between local data and fused data */
        obtain_track_errors (current_frame, list_of_tracks);

#ifdef SHOW_IMAGE
        /* drawing the player positions on the ground plane */

        cvCircle (ground_plane_image, cvPoint (((int)(track_px_global (1, current_frame)/SCALE_X)),
                ((int) (track_py_global(1, current_frame)/SCALE_Y))) , 4, CV_RGB(255, 0, 0), CV_FILLED, CV_AA, 0);

        printf ("Frame:%d, establishing player @: %f,%f --%f,%f --%f,%f\n", current_frame, track_fused_x(0, current_frame),
                 track_fused_y(0, current_frame), x_fused, y_fused, track_px_global(1, current_frame), track_py_global (1, current_frame));

        cvResize (ground_plane_image, DispImage, CV_INTER_AREA);

        cvResetImageROI (DispImage);
        cvShowImage ("Apidis", DispImage);
        cvWriteFrame (writer, DispImage);

        /* free the memory */
        for (i = 0; i < MAX_CAM; i++)
          cvReleaseImage (&cam[i]);

        cvWaitKey(2);
#endif

    } /* for each frame */

    /* get the mean errors of each camera */
    obtain_mean_errors (list_of_tracks);

#ifdef SHOW_IMAGE
    cvReleaseImage (&ground_plane_image);
    cvReleaseVideoWriter (&writer);
    cvDestroyWindow ("Apidis");
#endif

return 0;
}
Example #18
0
int process_image(IplImage* frame, int draw, int print)
{
	int i, j;
	float t;

	uint8_t* pixels;
	int nrows, ncols, ldim;

	#define MAXNDETECTIONS 2048
	int ndetections=0;
	float qs[MAXNDETECTIONS], rs[MAXNDETECTIONS], cs[MAXNDETECTIONS], ss[MAXNDETECTIONS];

	static IplImage* gray = 0;
	static IplImage* pyr[5] = {0, 0, 0, 0, 0};

	/*
		IMPORTANT:
			* these parameters are highly specific for each detection cascade
			  (determine them experimentally)
	*/

	// * this function should be generated with picogen from a detection cascade output by picolrn
	int (*run_detection_cascade)(float*, int, int, int, void*, int, int, int)
		= run_facefinder;

	// * detection quality threshold (must be >= 0.0f)
	// * you can vary the TPR and FPR with this value
	// * if you're experiencing too many false positives, try a larger number here (for example, 7.5f)
	float qthreshold = 5.0f;

	// * how much to rescale the window during the multiscale detection process
	// * increasing this value leads to lower number of detections and higher processing speed
	// * for example, set to 1.2f if you're using pico on a mobile device
	float scalefactor = 1.1f;

	// * how much to move the window between neighboring detections
	// * increasing this value leads to lower number of detections and higher processing speed
	// * for example, set to 0.05f if you want really high recall
	float stridefactor = 0.1f;

	// * coarse image pyramid support
	// * can improve noise and aliasing problems in some applications
	// * set to 1 if pico fails to detect large objects
	int usepyr = 0;

	/*
		...
	*/

	//
	if(!pyr[0])
	{
		//
		gray = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1);

		//
		pyr[0] = gray;
		pyr[1] = cvCreateImage(cvSize(frame->width/2, frame->height/2), frame->depth, 1);
		pyr[2] = cvCreateImage(cvSize(frame->width/4, frame->height/4), frame->depth, 1);
		pyr[3] = cvCreateImage(cvSize(frame->width/8, frame->height/8), frame->depth, 1);
		pyr[4] = cvCreateImage(cvSize(frame->width/16, frame->height/16), frame->depth, 1);
	}

	// get grayscale image
	if(frame->nChannels == 3)
		cvCvtColor(frame, gray, CV_RGB2GRAY);
	else
		cvCopy(frame, gray, 0);

	// perform detection with the pico library
	t = getticks();

	if(usepyr)
	{
		int nd;

		//
		pyr[0] = gray;

		pixels = (uint8_t*)pyr[0]->imageData;
		nrows = pyr[0]->height;
		ncols = pyr[0]->width;
		ldim = pyr[0]->widthStep;

		ndetections = find_objects(rs, cs, ss, qs, MAXNDETECTIONS, run_detection_cascade, pixels, nrows, ncols, ldim, scalefactor, stridefactor, MAX(16, minsize), MIN(128, maxsize));

		for(i=1; i<5; ++i)
		{
			cvResize(pyr[i-1], pyr[i], CV_INTER_LINEAR);

			pixels = (uint8_t*)pyr[i]->imageData;
			nrows = pyr[i]->height;
			ncols = pyr[i]->width;
			ldim = pyr[i]->widthStep;

			nd = find_objects(&rs[ndetections], &cs[ndetections], &ss[ndetections], &qs[ndetections], MAXNDETECTIONS-ndetections, run_detection_cascade, pixels, nrows, ncols, ldim, scalefactor, stridefactor, MAX(64, minsize>>i), MIN(128, maxsize>>i));

			for(j=ndetections; j<ndetections+nd; ++j)
			{
				rs[j] = (1<<i)*rs[j];
				cs[j] = (1<<i)*cs[j];
				ss[j] = (1<<i)*ss[j];
			}

			ndetections = ndetections + nd;
		}
	}
	else
	{
Example #19
0
int main (int argc, const char * argv[])
{
    float scale = 1.15;
    int elements = 7;
    
    // Array
    IplImage* images[10];
    
    images[0] = cvLoadImage("megusta.png");
    images[1] = cvLoadImage("foreveralone.png");
    images[2] = cvLoadImage("pokerface.png");
    images[3] = cvLoadImage("trollface.png");
    images[4] = cvLoadImage("fuckyeah.png");
    images[5] = cvLoadImage("gordo.png");
    images[6] = cvLoadImage("yuno.png");
    
    srand(time(NULL));
    int j = (int)rand() % elements;
    
    clock_t actual = clock();
    clock_t anterior = clock();
    
    capture = cvCreateCameraCapture(0);
    IplImage* originalImg;
    
    char *filename = "haarcascade_frontalface_alt.xml";
    
    cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );
    storage = cvCreateMemStorage( 0 );
    
    cvNamedWindow( "image", CV_WINDOW_AUTOSIZE );
    
    if( capture )
    {
        for(;;)
        {
            actual = clock();
            if(actual - anterior > (5*CLOCKS_PER_SEC)){
                j = (int)rand() % elements;
                anterior = actual;
            }
            
            cvGrabFrame(capture);
            originalImg = cvRetrieveFrame(capture);
            
            if(!originalImg) break;
            
            CvSeq *faces = cvHaarDetectObjects(originalImg,cascade,storage,1.1,3,0,cvSize( 40, 40 ) );
            
            for( int i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
                CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
                
                IplImage *tmp = cvCreateImage(cvSize(r->width*scale, r->height*scale), images[j]->depth,images[j]->nChannels);
                cvResize(images[j], tmp, CV_INTER_CUBIC);
                cvSetImageROI(originalImg,cvRect(r->x-r->width/7,r->y-r->height/15,tmp->width,tmp->height));
                cvAdd(originalImg, tmp, originalImg);
                cvReleaseImage(&tmp);
                cvResetImageROI(originalImg);
                
            }

            cvShowImage("image", originalImg);
            
            char c = cvWaitKey(10);
            if( c == 27 ) break;
            
        }
        cvReleaseCapture(&capture);
    }
    cvDestroyWindow("image");
    
    return 0;
}
Example #20
0
static void
cvImageWidget_size_allocate (GtkWidget     *widget,
                        GtkAllocation *allocation)
{
  CvImageWidget *image_widget;

  //printf("cvImageWidget_size_allocate\n");
  g_return_if_fail (widget != NULL);
  g_return_if_fail (CV_IS_IMAGE_WIDGET (widget));
  g_return_if_fail (allocation != NULL);

#if defined (GTK_VERSION3)
  gtk_widget_set_allocation(widget, allocation);
#else
  widget->allocation = *allocation;
#endif //GTK_VERSION3
  image_widget = CV_IMAGE_WIDGET (widget);


  if( (image_widget->flags & CV_WINDOW_AUTOSIZE)==0 && image_widget->original_image ){
      // (re) allocated scaled image
      if( image_widget->flags & CV_WINDOW_NO_IMAGE ){
          cvImageWidget_set_size( widget, image_widget->original_image->cols,
                                          image_widget->original_image->rows);
      }
      else{
          cvImageWidget_set_size( widget, allocation->width, allocation->height );
      }
      cvResize( image_widget->original_image, image_widget->scaled_image, CV_INTER_AREA );
  }

  if (gtk_widget_get_realized (widget))
    {
      image_widget = CV_IMAGE_WIDGET (widget);

      if( image_widget->original_image &&
              ((image_widget->flags & CV_WINDOW_AUTOSIZE) ||
               (image_widget->flags & CV_WINDOW_NO_IMAGE)) )
      {
#if defined (GTK_VERSION3)
          allocation->width = image_widget->original_image->cols;
          allocation->height = image_widget->original_image->rows;
          gtk_widget_set_allocation(widget, allocation);
#else
          widget->allocation.width = image_widget->original_image->cols;
          widget->allocation.height = image_widget->original_image->rows;
#endif //GTK_VERSION3
          gdk_window_move_resize( gtk_widget_get_window(widget),
              allocation->x, allocation->y,
              image_widget->original_image->cols, image_widget->original_image->rows );
          if(image_widget->flags & CV_WINDOW_NO_IMAGE){
              image_widget->flags &= ~CV_WINDOW_NO_IMAGE;
              gtk_widget_queue_resize( GTK_WIDGET(widget) );
          }
      }
      else{
          gdk_window_move_resize (gtk_widget_get_window(widget),
                  allocation->x, allocation->y,
                  allocation->width, allocation->height );
      }
    }
}
Example #21
0
/**
 *  buffer header callback function for video
 *
 * @param port Pointer to port from which callback originated
 * @param buffer mmal buffer header pointer
 */
static void video_buffer_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
    MMAL_BUFFER_HEADER_T *new_buffer;
    PORT_USERDATA *pData = (PORT_USERDATA *)port->userdata;

    if (pData)
    {

        if (buffer->length)
        {

            mmal_buffer_header_mem_lock(buffer);

            //
            // *** PR : OPEN CV Stuff here !
            //
            int w=pData->pstate->width;	// get image size
            int h=pData->pstate->height;
            int h4=h/4;

            memcpy(py->imageData,buffer->data,w*h);	// read Y

            if (pData->pstate->graymode==0)
            {
                memcpy(pu->imageData,buffer->data+w*h,w*h4); // read U
                memcpy(pv->imageData,buffer->data+w*h+w*h4,w*h4); // read v

                cvResize(pu, pu_big, CV_INTER_NN);
                cvResize(pv, pv_big, CV_INTER_NN);  //CV_INTER_LINEAR looks better but it's slower
                cvMerge(py, pu_big, pv_big, NULL, image);

                cvCvtColor(image,dstImage,CV_YCrCb2RGB);	// convert in RGB color space (slow)
                cvShowImage("camcvWin", dstImage );
            }
            else
            {
                cvShowImage("camcvWin", py); // display only gray channel
            }

            cvWaitKey(1);
            nCount++;		// count frames displayed

            mmal_buffer_header_mem_unlock(buffer);
        }
        else vcos_log_error("buffer null");

    }
    else
    {
        vcos_log_error("Received a encoder buffer callback with no state");
    }

    // release buffer back to the pool
    mmal_buffer_header_release(buffer);

    // and send one back to the port (if still open)
    if (port->is_enabled)
    {
        MMAL_STATUS_T status;

        new_buffer = mmal_queue_get(pData->pstate->video_pool->queue);

        if (new_buffer)
            status = mmal_port_send_buffer(port, new_buffer);

        if (!new_buffer || status != MMAL_SUCCESS)
            vcos_log_error("Unable to return a buffer to the encoder port");
    }

}
Example #22
0
DEFINE_THREAD_ROUTINE(video_stage, data)
{
  C_RESULT res;

  vp_api_io_pipeline_t    pipeline;
  vp_api_io_data_t        out;
  vp_api_io_stage_t       stages[NB_STAGES];

  vp_api_picture_t picture;

  video_com_config_t              icc;
  vlib_stage_decoding_config_t    vec;
  vp_stages_yuv2rgb_config_t      yuv2rgbconf;
#ifdef RECORD_VIDEO
  video_stage_recorder_config_t   vrc;
#endif
  /// Picture configuration
  picture.format        = PIX_FMT_YUV420P;

  picture.width         = QVGA_WIDTH;
  picture.height        = QVGA_HEIGHT;
  picture.framerate     = 30;

  picture.y_buf   = vp_os_malloc( QVGA_WIDTH * QVGA_HEIGHT     );
  picture.cr_buf  = vp_os_malloc( QVGA_WIDTH * QVGA_HEIGHT / 4 );
  picture.cb_buf  = vp_os_malloc( QVGA_WIDTH * QVGA_HEIGHT / 4 );

  picture.y_line_size   = QVGA_WIDTH;
  picture.cb_line_size  = QVGA_WIDTH / 2;
  picture.cr_line_size  = QVGA_WIDTH / 2;

  vp_os_memset(&icc,          0, sizeof( icc ));
  vp_os_memset(&vec,          0, sizeof( vec ));
  vp_os_memset(&yuv2rgbconf,  0, sizeof( yuv2rgbconf ));

  icc.com                 = COM_VIDEO();
  icc.buffer_size         = 100000;
  icc.protocol            = VP_COM_UDP;
  COM_CONFIG_SOCKET_VIDEO(&icc.socket, VP_COM_CLIENT, VIDEO_PORT, wifi_ardrone_ip);

  vec.width               = QVGA_WIDTH;
  vec.height              = QVGA_HEIGHT;
  vec.picture             = &picture;
  vec.block_mode_enable   = TRUE;
  vec.luma_only           = FALSE;

  yuv2rgbconf.rgb_format = VP_STAGES_RGB_FORMAT_RGB24;
#ifdef RECORD_VIDEO
  vrc.fp = NULL;
#endif

  pipeline.nb_stages = 0;

  stages[pipeline.nb_stages].type    = VP_API_INPUT_SOCKET;
  stages[pipeline.nb_stages].cfg     = (void *)&icc;
  stages[pipeline.nb_stages].funcs   = video_com_funcs;

  pipeline.nb_stages++;

#ifdef RECORD_VIDEO
  stages[pipeline.nb_stages].type    = VP_API_FILTER_DECODER;
  stages[pipeline.nb_stages].cfg     = (void*)&vrc;
  stages[pipeline.nb_stages].funcs   = video_recorder_funcs;

  pipeline.nb_stages++;
#endif // RECORD_VIDEO
  stages[pipeline.nb_stages].type    = VP_API_FILTER_DECODER;
  stages[pipeline.nb_stages].cfg     = (void*)&vec;
  stages[pipeline.nb_stages].funcs   = vlib_decoding_funcs;

  pipeline.nb_stages++;

  stages[pipeline.nb_stages].type    = VP_API_FILTER_YUV2RGB;
  stages[pipeline.nb_stages].cfg     = (void*)&yuv2rgbconf;
  stages[pipeline.nb_stages].funcs   = vp_stages_yuv2rgb_funcs;

  pipeline.nb_stages++;

  stages[pipeline.nb_stages].type    = VP_API_OUTPUT_SDL;
  stages[pipeline.nb_stages].cfg     = NULL;
  stages[pipeline.nb_stages].funcs   = vp_stages_output_gtk_funcs;

  pipeline.nb_stages++;

  pipeline.stages = &stages[0];
 
  /* Processing of a pipeline */
  if( !ardrone_tool_exit() )
  {
    PRINT("Video stage thread initialization\n");

    res = vp_api_open(&pipeline, &pipeline_handle);
   // PRINT("VP_API OPENED\n");
    if( SUCCEED(res) )
    {
      int loop = SUCCESS;
      out.status = VP_API_STATUS_PROCESSING;
      cvStartWindowThread();
      #define frontWindow "DroneView"
      cvNamedWindow(frontWindow, CV_WINDOW_AUTOSIZE);
      frontImgStream = cvCreateImage(cvSize(picture.width, picture.height), IPL_DEPTH_8U, 3);

      #define bottomWindow "BomberView"
      if(extractBottomImage)
	cvNamedWindow(bottomWindow, CV_WINDOW_AUTOSIZE);
      bottomImgStream = cvCreateImage(cvSize(bottomWidth, bottomHeight), IPL_DEPTH_8U, 3);

      
      IplImage *frame;
      CvCapture *capture;
      if(USEWEBCAM)
      {
        capture = cvCaptureFromCAM(WEBCAMINDEX);
    
        if(!capture)
        {
          printf("ERROR: Cannot Initialize Webcam.\n");
          loop = !SUCCESS;
        }
      }

      while( !ardrone_tool_exit() && (loop == SUCCESS) )
      {
        if(!USEWEBCAM)
        {
          if( SUCCEED(vp_api_run(&pipeline, &out)) ) {
              if (vec.controller.num_frames==0) continue;
	      /*int i;
              for(i = 0; i < (picture.width)*(picture.height); i++)
              {
                frontImgStream->imageData[i*3] = picture.y_buf[i];
                frontImgStream->imageData[i*3+1] = picture.y_buf[i];
                frontImgStream->imageData[i*3+2] = picture.y_buf[i];
              }
	      */
             cvCvtColor(rgbHeader, frontImgStream, CV_RGB2BGR); //[for colour]

              if(extractBottomImage)
              {
                int j = 0, i;
                for(i = 0; j < bottomHeight*bottomWidth; i = i%picture.width >= bottomWidth-1 ? i - (i%picture.width) + picture.width : i+1)
                {
                  bottomImgStream->imageData[j*3] = frontImgStream->imageData[i*3];
                  bottomImgStream->imageData[j*3+1] = frontImgStream->imageData[i*3+1];
                  bottomImgStream->imageData[j*3+2] = frontImgStream->imageData[i*3+2];
                  frontImgStream->imageData[i*3] = 0;
                  frontImgStream->imageData[i*3+1] = 0;
                  frontImgStream->imageData[i*3+2] = 0;
                  j++;
                }
              }
              
              //cvLine(frontImgStream, cvPoint(picture.width/2, 0), cvPoint(picture.width/2, picture.height), CV_RGB(0,255,0), 1, CV_AA, 0 );
              cvShowImage(frontWindow, frontImgStream);
              if(extractBottomImage)
              cvShowImage(bottomWindow, bottomImgStream);
              
              if(CAPTUREIMAGESTREAM)
              {
                char filename[256];
                struct timeval t;
                gettimeofday(&t, NULL);
                sprintf(filename, "%d.%06d.jpg", (int)t.tv_sec, (int)t.tv_usec);
                if(frontImgStream != NULL && cvSaveImage(filename, cvCloneImage(frontImgStream), 0))
                  printf("Image dumped to %s\n", filename);
                else
                  printf("Error dumping image...\n");
                if(extractBottomImage)
                {
                  sprintf(filename, "%d.%06dbottom.jpg", (int)t.tv_sec, (int)t.tv_usec);
                  if(bottomImgStream != NULL && cvSaveImage(filename, cvCloneImage(bottomImgStream), 0))
                    printf("Bottom Image dumped to %s\n", filename);
                  else
                    printf("Error dumping bottom image...\n");
                }
              }
          }
          else loop = -1;
        }
        else //use webcam
        {
          frame = cvQueryFrame(capture);
      
          if(!frame) break;
          
          cvResize(frame, frontImgStream, CV_INTER_LINEAR);
          cvShowImage(frontWindow, frontImgStream);
          
          cvWaitKey(1);
        }
      }

      cvDestroyWindow(frontWindow);
      if(extractBottomImage)
	cvDestroyWindow(bottomWindow);
      //cvReleaseImage(&imgBottom);
      //cvDestroyWindow(bottomWindow);
      cvReleaseImage(&frontImgStream);
      vp_api_close(&pipeline, &pipeline_handle);
    }
  }

  PRINT("   Video stage thread ended\n\n");

  return (THREAD_RET)0;
}
void
MotionCapture::HUD(char* title, int nArgs, ...) {

    // img - Used for getting the arguments
    IplImage *img;

    // DispImage - the image in which input images are to be copied
    IplImage *DispImage;

    int size;
    int i;
    int m, n;
    int x, y;

    // w - Maximum number of images in a row
    // h - Maximum number of images in a column
    int w, h;

    // scale - How much we have to resize the image
    float scale;
    int max;

    // If the number of arguments is lesser than 0 or greater than 12
    // return without displaying
    if(nArgs <= 0) {
        printf("Number of arguments too small....\n");
        return;
    }
    else if(nArgs > 12) {
        printf("Number of arguments too large....\n");
        return;
    }
    // Determine the size of the image,
    // and the number of rows/cols
    // from number of arguments
    else if (nArgs == 1) {
        w = h = 1;
        size = 300;
    }
    else if (nArgs == 2) {
        w = 2; h = 1;
        size = 300;
    }
    else if (nArgs == 3 || nArgs == 4) {
        w = 2; h = 2;
        size = 350;
    }
    else if (nArgs == 5 || nArgs == 6) {
        w = 3; h = 2;
        size = 200;
    }
    else if (nArgs == 7 || nArgs == 8) {
        w = 4; h = 2;
        size = 200;
    }
    else {
        w = 4; h = 3;
        size = 150;
    }

    // Create a new 3 channel image
    DispImage = cvCreateImage( cvSize( 50 + size*w, 60 + size*h), 8, 3 );

    // Used to get the arguments passed
    va_list args;
    va_start(args, nArgs);

    // Loop for nArgs number of arguments
    for (i = 0, m = 20, n = 20; i < nArgs; i++, m += (20 + size)) {

        // Get the Pointer to the IplImage
        img = va_arg(args, IplImage*);

        // Check whether it is NULL or not
        // If it is NULL, release the image, and return
        if(img == 0) {
            printf("Invalid arguments");
            cvReleaseImage(&DispImage);
            return;
        }

        // Find the width and height of the image
        x = img->width;
        y = img->height;

        // Find whether height or width is greater in order to resize the image
        max = (x > y)? x: y;

        // Find the scaling factor to resize the image
        scale = (float) ( (float) max / size );

        // Used to Align the images
        if( i % w == 0 && m!= 20) {
            m = 20;
            n+= 20 + size;
        }

        // Set the image ROI to display the current image
        cvSetImageROI(DispImage, cvRect(m, n, (int)( x/scale ), (int)( y/scale )));

        // Resize the input image and copy the it to the Single Big Image
        cvResize( img, DispImage );

        // Reset the ROI in order to display the next image
        cvResetImageROI(DispImage);
    }

    // Create a new window, and show the Single Big Image
    cvNamedWindow( title, 1 );
    cvShowImage( title, DispImage);

    //cvWaitKey();
    //cvDestroyWindow(title);

    // End the number of arguments
    va_end(args);

    // Release the Image Memory
    //cvReleaseImage(&DispImage);
}
int main(int argc, char* argv[]){
    cvNamedWindow("Color detection",CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Binaria",CV_WINDOW_AUTOSIZE);
    CvCapture* capture=cvCaptureFromCAM(0);
    frame = cvQueryFrame(capture);
    result = cvCreateImage( cvGetSize(frame), 8, 3 );
    IplImage* destination = cvCreateImage( cvGetSize(frame), 8, 3 );
    CvFont font;
    cvInitFont(&font, CV_FONT_VECTOR0, 2.0, 2.0, 0, 3, CV_AA);


    boolean calibratMa = false;
    boolean calibratVenda = false;

    int input;
    while( (input = cvWaitKey(50))!= 118 ) {
        frame = cvQueryFrame(capture);

        if(!calibratMa){
            if(input == 32){
                calibrarMa();
                calibratMa = true;
            }
            else cvRectangle( frame, cvPoint(200, 100), cvPoint(300, 200), CV_RGB(255,0,0), 2 );
            /*Es mostra un rectangle per posar a sobre la mà*/

        }
        else if(!calibratVenda){
            if(input == 32){
                calibrarVenda();
                calibratVenda = true;
            }
            else cvRectangle( frame, cvPoint(200, 250), cvPoint(250, 300), CV_RGB(0,255,255), 2 );
            /*Es mostra un rectangle per posar a sobre la mà*/
        }
        else{
            int step =frame->widthStep;
            int canals = frame->nChannels;
            uchar* imgData = (uchar*) frame->imageData;
            uchar* imgDataResult = (uchar*) result->imageData;

            int num_ma = 0;
            int num_venda = 0;
            int sum_ma[2] = {0,0};
            int sum_venda[2] = {0,0};


            int i = 0, j = 0;
            for( i = 0; i < frame->height; i++)
                for( j = 0; j < frame->width; j++)
                    if( ((imgData[i*step+j*canals]) >= blue[0] && (imgData[i*step+j*canals]) <= blue[1])
                    && ((imgData[i*step+j*canals+1]) >= green[0] && (imgData[i*step+j*canals+1]) <= green[1] )
                    && ((imgData[i*step+j*canals+2]) >= red[0] && (imgData[i*step+j*canals+2]) <= red[1] )){
                        /*Mà*/
                        imgDataResult[i*result->widthStep+j*result->nChannels]=255;
                        imgDataResult[i*result->widthStep+j*result->nChannels+1]=0;
                        imgDataResult[i*result->widthStep+j*result->nChannels+2]=0;
                        num_ma ++;
                        sum_ma[0] += j;
                        sum_ma[1] += i;
                    }

                    else if(((imgData[i*step+j*canals]) >= vblue[0] && (imgData[i*step+j*canals]) <= vblue[1])
                    && ((imgData[i*step+j*canals+1]) >= vgreen[0] && (imgData[i*step+j*canals+1]) <= vgreen[1] )
                    && ((imgData[i*step+j*canals+2]) >= vred[0] && (imgData[i*step+j*canals+2]) <= vred[1] )){
                        /*Venda*/
                        imgDataResult[i*result->widthStep+j*result->nChannels]=255;
                        imgDataResult[i*result->widthStep+j*result->nChannels+1]=255;
                        imgDataResult[i*result->widthStep+j*result->nChannels+2]=0;

                        num_venda++;
                        sum_venda[0] += j;
                        sum_venda[1] += i;
                    }
                    else{/*Background*/
                        imgDataResult[i*result->widthStep+j*result->nChannels]=0;
                        imgDataResult[i*result->widthStep+j*result->nChannels+1]=0;
                        imgDataResult[i*result->widthStep+j*result->nChannels+2]=0;
                    }

        cvSmooth(result, result, CV_MEDIAN, 3, 3 );

        if(num_ma > 0 && num_venda > 0){
            /*S'obté el punt central de la mà i la venda*/
            float mig[2] = {sum_ma[0]/num_ma,sum_ma[1]/num_ma};
            float vmig[2] = {sum_venda[0]/num_venda,sum_venda[1]/num_venda};
            cvRectangle( frame, cvPoint(mig[0], mig[1]), cvPoint(mig[0]+10, mig[1]+10), CV_RGB(0,0,0), -1 );
            cvRectangle( frame, cvPoint(vmig[0], vmig[1]), cvPoint(vmig[0]+10, vmig[1]+10), CV_RGB(0,0,0), -1 );

            /*Es calcula la distància entre els dos punts*/
            double dist = sqrt(pow(mig[0]-vmig[0],2) + pow(mig[1]-vmig[1], 2));

            /*Es normalitza la imatge, s'adapta la seva mida*/
            destination = cvCreateImage(cvSize((int)(frame->width*distOK/dist) , (int)(frame->height*distOK/dist) ),frame->depth, frame->nChannels );
            cvResize(result, destination, CV_INTER_LINEAR);

            int step =destination->widthStep;
            int canals = destination->nChannels;
            uchar* imgData = (uchar*) destination->imageData;

            /*S'obté el nombre de píxels que pertanyen a la mà*/
            int num_ma = 0;
            for( i = 0; i < destination->height; i++)
                for( j = 0; j < destination->width; j++)
                    if((imgData[i*step+j*canals]) == 255
                    && (imgData[i*step+j*canals+1]) == 0
                    && (imgData[i*step+j*canals+2]) == 0 ){
                        num_ma++;
                    }


            /*Comparació del gest actual amb la resta de possibles segons la seva àreea*/
            char num[1];
            strcpy(num, "0");
            if(num_ma > 14000) { /*5 dits*/
                strcpy(num, "5");
            }
            else if(num_ma > 10000) { /*4 dits*/
                strcpy(num, "4");
            }
            else if(num_ma > 7500) { /*3 dits*/
                strcpy(num, "3");
            }
            else if(num_ma > 6000) { /*2 dits*/
                strcpy(num, "2");
            }
            else if(num_ma > 4500) { /*1 dit*/
                strcpy(num, "1");
            }
            else { /*Cap dit*/
                strcpy(num, "0");
            }
            cvPutText(frame, num, cvPoint(10, 50), &font, cvScalar(255, 255, 255, 0));
        }

        }

        cvShowImage("Color detection", frame);
        cvShowImage("Binaria", result);
        }
    cvDestroyAllWindows();
    return 0;
}
LPWSTR APIENTRY picxdel(char* fileName)
{

	QRcodeImage * QR_image;
	ContentDecoder * QR_Decode;

	BYTE m_CodeData[MAX_MODULESIZE][MAX_MODULESIZE]; // 存储条形码位数据

	int m_nCodeSize;//编码区域大小
	//int m_nMoudleSize;//模块大小
	int m_nVersion;//版本号
	//int m_nLevel;//纠错等级
	//POINT leftup;
	//POINT right;
	//POINT down;
	//int nShowSize;
	try{
			if( fileName==NULL)
		{
			LPWSTR QRstring1=_T("文件名不能为空");
		 return QRstring1;
		
		}
	//IplImage *img=cvLoadImage(fileName,CV_LOAD_IMAGE_GRAYSCALE);
	//cvThreshold(img,img,50,255,CV_THRESH_BINARY_INV);
	////cvShowImage(fileName.c_str(),img);
	//cvSaveImage("D:\\1.bmp",img);
	//cvReleaseImage(&img);
IplImage *src=0, *dst=0; 
		src=cvLoadImage(fileName,1);
		if(src==NULL )
		{
			CString   s   =fileName;//或
			s=s+"文件载入错误。";

              LPCTSTR   lp   =   (LPCTSTR)(s);
			LPWSTR QRstring1=(LPWSTR)lp;
		 return QRstring1;
		}
		
		//这是图片本身的问题,有一道白色边框
		//先清理周边的点,这是为了防止区域截取时候矩形区域大于图像造成无效
		for (int y=0;y<src->height;y++)
		{
			uchar *ptr=(uchar*)(src->imageData+y*src->widthStep);
			for (int x=0;x<src->width;x++)
			{
				if(x>3&&x<(src->width-3)&&y>3&&y<(src->height-3)){
					//范围内
				}else{
					ptr[src->nChannels*x]=0;
					ptr[src->nChannels*x+1]=0;
					ptr[src->nChannels*x+2]=0;

				}

			}
		}
		if(src->width<360){
			int scale =3; //缩放倍数为3倍 
			CvSize dst_cvsize; //目标图像尺寸 
			dst_cvsize.width =scale*src->width;  //目标图像的宽为源图象宽的scale倍
			dst_cvsize.height = scale*src->height; //目标图像的高为源图象高的scale倍
			dst = cvCreateImage(dst_cvsize, src->depth, src->nChannels); //构造目标图象
		}
		else dst = cvCreateImage(cvGetSize(src), src->depth, src->nChannels); //构造目标图象
		cvResize(src, dst); //缩放源图像到目标图像
	
		RecDect *rd=new RecDect(dst);	//声明对象
	rd->RfindSquaresRect(0.1f,0.81f);//第一次调用,获取图像轮廓定点坐标:此步获取图像大概位置,然后进行区域选择
	
		/*此时已经获得pt,进行区域截取并对歪掉图像进行旋转*/
		rd->AreaRotate();
		//cvShowImage("dfdf",rd->img);
		
		int angle=rd->recognVersion(rd->findSquares4() );//位置探测图形提取并计算版本号,angle为矫正正确位置的角度
		
		if(angle!=0){//这是为旋转不到位的图像准备的
			rd->FitRotate(rd->img,angle);
			rd->FitRotate(rd->img0,angle);
			rd->RfindSquaresRect(0.36f,0.9);//获取图像轮廓定点坐标:此步获取图像大概位置,chongxin dingweidingdian pt weizhi,keyi youhua ,xianzai meishijian
			angle=rd->recognVersion(rd->findSquares4() );//位置探测图形提取并计算版本号
		}
		
		//rd->drawSquares();
		
		IplImage * loadImage=rd->drawGrid();
		if(loadImage==NULL){
		//	MessageBox(_T("红外QR码转化过程出错"),_T("QR码识别"));
				
		cvReleaseImage( &src );		////释放图像
		cvReleaseImage( &dst );
	
		LPWSTR QRstring1=_T("识别过程出现异常");
		 return QRstring1;
		}
		cvSaveImage(fileName,loadImage);
		QR_image = new QRcodeImage(loadImage);
		//cvReleaseImage(&loadImage);
		QR_image->GetImageData();

		m_nVersion = QR_image->finderPattern->m_version;
		m_nCodeSize = m_nVersion * 4 +17;
		for(int i=0;i<m_nCodeSize;i++)
			for(int j=0;j<m_nCodeSize;j++)
				m_CodeData[i][j]=QR_image->bitMatrix[i][j];
		
	
		
		//解码
		QR_Decode = new ContentDecoder;
		QR_Decode->DecodeData(m_nCodeSize,m_nVersion,m_CodeData);
		QRstring=(QR_Decode->m_strData).GetBuffer(QR_Decode->m_strData.GetLength());
		

		//显示结果
		//GetDlgItem(IDC_EDIT_TEXT)->SetWindowText(QR_Decode->m_strData);//"由于解码功能尚未完成,暂时无法显示!!!"
;
		QRversion=QR_Decode->m_nVersion; 
	
		char str;
		switch(QR_Decode->m_nLevel)
		{
		case QR_LEVEL_L:str='L';break;
		case QR_LEVEL_M:str='M';break;
		case QR_LEVEL_Q:str='Q';break;
		case QR_LEVEL_H:str='H';break;
		}
		//GetDlgItem(IDC_STATIC_LEVEL)->SetWindowText("纠错等级:"+str);
	    QRLevel=str;
		
		cvReleaseImage( &src );		////释放图像
		cvReleaseImage( &dst );
		cvReleaseImage( &loadImage );
		return QRstring;
	}catch(...)
	{
		LPWSTR QRstring1=_T("识别过程出现异常");
		 return QRstring1;
	}
		
	
}
Example #26
0
void graphicalJoystick(int joy_idx, SDL_Joystick *joy, FILE* fichierKernel){

		const char* original = "FAJA" ;
	char key ;
	CvCapture *capture ;
	IplImage* cap ;
	IplImage* cap_resize ;

	capture = cvCreateCameraCapture(CV_CAP_ANY);
	
	if(!capture){
		printf("! Error while capturing from camera !\n");
		exit(1);
	}	
	
	CvFont cross,faja;

cvInitFont(&cross,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 1,1,0,1,1);
cvInitFont(&faja,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 1,1,0,2,3);

	cap = cvQueryFrame(capture);
	cap_resize = cvCreateImage(cvSize(1366,768), cap->depth, cap->nChannels);

cvNamedWindow(original, CV_WINDOW_NORMAL);
	cvMoveWindow(original, 0, 0);
	cvSetWindowProperty(original, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
        
        int num_axes    = SDL_JoystickNumAxes(joy) -1;
        int num_buttons = SDL_JoystickNumButtons(joy);
        int num_hats    = SDL_JoystickNumHats(joy);
FILE* f = NULL;

        f=fopen("/dev/servoblaster", "w");
        if (f == NULL)
          exit(1);
        int quit = 0;
        SDL_Event event;
        while(!quit)
        {
          SDL_Delay(10);

          bool something_new = false;
          while (SDL_PollEvent(&event)) {
			  
			  cap = cvQueryFrame(capture);
		cvPutText (cap,"+",cvPoint(cap->width/2,cap->height/2), &cross, cvScalar(0,255,0,0));
		cvPutText (cap,"FAJA - Mode manuel",cvPoint(10,30), &faja, cvScalar(255,0,0,0));
		cvResize(cap, cap_resize, CV_INTER_LINEAR);
	

		cvShowImage(original, cap_resize);
            something_new = true;
            switch(event.type)
            {
              case SDL_JOYAXISMOTION:
                assert(event.jaxis.axis < num_axes+1);
                axes[event.jaxis.axis] = event.jaxis.value;
                break;

              case SDL_JOYBUTTONDOWN:
              case SDL_JOYBUTTONUP:
                assert(event.jbutton.button < num_buttons);
                buttons[event.jbutton.button] = event.jbutton.state;
                break;

              case SDL_JOYHATMOTION:
                assert(event.jhat.hat < num_hats);
                hats[event.jhat.hat] = event.jhat.value;
                break;

              case SDL_QUIT:
                quit = 1;
                printf("Recieved interrupt, exiting\n");
                break;

              default:
                fprintf(stderr, "Error: Unhandled event type: %d\n", event.type);
                break;
            }
          }
          if (something_new)
          {
            //clear();
            
            res = 50+(((-1 * (axes[0])+32768)*190)/65535);
                fprintf(f,"1=%i\n",(int)res);
                printf("echo 1=%i > /dev/servoblaster\n", (int) res);
                fflush(f);

                res = 50+(((-1 * (axes[1])+32768)*190)/65535);
                fprintf(f,"0=%i\n", (int)res);
                printf("echo 0=%i > /dev/servoblaster\n", (int) res);
                fflush(f);   
     
            }
          }
        } // while
Example #27
0
void* captureThread(void* args)
{
    CvCapture* capture = NULL;
    IplImage* frame;
    IplImage* frame_copy;
    IplImage* gray;
    ChessBoard chess;
    int pointsPerScene;
    CvPoint2D32f* points;
    int pointsNum[1];
    CvMat* cornerPoints;
    CvMat* objectPoints;
    CvMat pointsNumMat;
    
    CvMat* intrinsic = cvCreateMat( 3, 3, CV_64F );
    CvMat* distortion = cvCreateMat( 4, 1, CV_64F );
    CvMat* rotation = cvCreateMat( 3, 3, CV_64F );
    CvMat* translation = cvCreateMat( 3, 1, CV_64F );
    
    loadIntrinsicParams("intrinsic_param_ref.txt", intrinsic, distortion );

    capture = cvCreateCameraCapture(0);

    if(capture == NULL){
	fprintf(stderr, "ERROR: Could not open Camera Device\n");
	exit(1);
    }
    
    frame = cvQueryFrame(capture);
    
    if(frame == NULL){
	fprintf(stderr, "ERROR: Could not query frame\n");
	exit(1);
    }
    
    frame_copy = cvCreateImage(cvGetSize(frame), 
			       frame->depth, 3);

    gray = cvCreateImage(cvGetSize(frame_copy), frame_copy->depth, 1);
    
    cvNamedWindow(captureWinName, CV_WINDOW_AUTOSIZE);
    cvMoveWindow(captureWinName, graphicsWinWidth + 10, 0);

    chess.dx = CHESS_ROW_DX;
    chess.dy = CHESS_COL_DY;
    chess.patternSize.width = CHESS_ROW_NUM;
    chess.patternSize.height = CHESS_COL_NUM;
    
    pointsPerScene = chess.patternSize.width * chess.patternSize.height;
    cornerPoints = cvCreateMat(pointsPerScene, 2, CV_32F);
    objectPoints = cvCreateMat(pointsPerScene, 3, CV_32F);

    pointsNum[0] = pointsPerScene;
    pointsNumMat = cvMat(1, 1, CV_32S, pointsNum);
    
    points = (CvPoint2D32f*)malloc( sizeof(CvPoint2D32f) * pointsPerScene );

    while(1){
	int allPointsFound = 0;
	int detectedPointsNum;
	frame = cvQueryFrame( capture );
	if( !frame ) {
	    fprintf(stderr, "could not query frame\n");
	    exit(1);
	}
	
	cvResize(frame, frame_copy, CV_INTER_NN);
	cvCvtColor(frame_copy, gray, CV_BGR2GRAY);
	if( cvFindChessboardCorners( gray, chess.patternSize, points,
				     &detectedPointsNum,
				     CV_CALIB_CB_ADAPTIVE_THRESH ) ){
	    cvFindCornerSubPix(gray, points, detectedPointsNum,
			       cvSize(5, 5), cvSize(-1, -1),
			       cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.1));
	    allPointsFound = 1;
	} else {
	    allPointsFound = 0;
	}

	cvDrawChessboardCorners( frame_copy, chess.patternSize, points,
				 detectedPointsNum, allPointsFound );
	
	if( allPointsFound ){
	    double cameraPosition[3];
	    double cameraOriVec[4];
	    store2DCoordinates( cornerPoints, points, chess, 0 );
	    store3DCoordinates( objectPoints, chess, 0 );
	    calibrate( cornerPoints, objectPoints,
		       intrinsic, distortion, rotation, translation );

	    getCameraPosition(rotation, translation, cameraPosition);
	    printf("cam pos relative to chess board: %.1f, %.1f, %.1f\n", 
		   cameraPosition[0],
		   cameraPosition[1],
		   cameraPosition[2]);
	    convertCv2Gl(cameraPosition, transGL);
	    getCameraOriVec(rotation, rotGL);
	}
	
	cvShowImage( captureWinName, frame_copy);
	if(cvWaitKey(10) == KEY_ESC){
	    exit(1);
	}
    }
    
    free(points);
    cvReleaseMat(&intrinsic);
    cvReleaseMat(&distortion);
    cvReleaseMat(&rotation);
    cvReleaseMat(&translation);
    
    cvReleaseMat(&cornerPoints);
    cvReleaseMat(&objectPoints);
    
    cvDestroyWindow(captureWinName);
    cvReleaseImage(&frame_copy);
    cvReleaseImage(&gray);
    cvReleaseCapture(&capture);
}
Example #28
0
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{
    int scale = 2;
    // Structure for getting video from camera or avi
    CvCapture* capture = 0;
    // Images to capture the frame from video or camera or from file
    IplImage *frame = 0, *frame_copy = 0;
    // Used for calculations
    int optlen = strlen("--cascade=");
    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 )
    {
        cascade_name = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    } else if (strncmp(argv[1], "train", 5) == 0) {
	learn_eigenfaces();
	exit(0);
    } else if (strncmp(argv[1], "test", 4) == 0) {
	recognize_eigenfaces();
	exit(0);
    } else {
        fprintf( stderr,
        "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n" );
        return -1;
        /*input_name = argc > 1 ? argv[1] : 0;*/
    }

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return -1;
    }
   
    cascade_eyes = (CvHaarClassifierCascade*)cvLoad(cascade_eyes_name, 0, 0, 0 );
    if (!cascade_eyes) {
	fprintf(stderr, "ERROR: failed to load eye classifier cascade\n" );
	return -1;
    }
 
    char *ext = strrchr(input_name, '.');
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);
    // Find whether to detect the object from file or from camera.
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') ){
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    } else if (ext && strncmp(ext, ".txt", 4) == 0) {
	capture = NULL;
    } else
        capture = cvCaptureFromAVI( input_name ); 

    // Create a new named window with title: result
    cvNamedWindow( "result", 1 );
    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    if( capture )
    {
 
        // Capture from the camera.
        for(;;)
        {
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture, 0 );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;

            if (!frame_copy) {
             	   printf("Allocate image\n");
		   frame_copy = cvCreateImage(cvSize(frame->width/2,frame->height/2),
                                   8, 3);
	    }
            cvResize(frame, frame_copy, CV_INTER_LINEAR);
 	    //cvCopy(frame, frame_copy,0);

            // Call the function to detect and draw the face
            //detect_and_draw( frame_copy );
	    process_image(frame_copy);
	    //cvShowImage("result", frame_copy);
            // Wait for a while before proceeding to the next frame
            cvWaitKey(1);
	    //if( cvWaitKey( 10 ) >= 0 )
            //    break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
	//cvReleaseImage( &frame_resized );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded succesfully, then:
    else
    {
	still = 1;
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

	IplImage* image = NULL;
	printf("%s\n", filename);
	if (strncmp(strrchr(filename, '.')+1, "txt", 3) != 0) {
        // Load the image from that filename
            image = cvLoadImage( filename, 1 );

        // If Image is loaded succesfully, then:
        //if( image )
        //{
            // Detect and draw the face
            //detect_and_draw( image );
	    process_image(image);
            // Wait for user input
            cvWaitKey(0);

            // Release the image memory
            cvReleaseImage( &image );
        }
        else
        {
	    printf("Not an image\n");
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f )
            {
                char buf[1000+1];

                // Get the line from the file
                while( fgets( buf, 1000, f ) )
                {

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded succesfully, then:
                    if( image )
                    {
                        // Detect and draw the face from the image
                        //detect_and_draw( image );
                        process_image(image);
                        // Wait for the user input, and release the memory
                        cvWaitKey(0);
                        cvReleaseImage( &image );
                    }
                }
                // Close the file
                fclose(f);
            }
        }

    }
    
    // Destroy the window previously created with filename: "result"
    cvDestroyWindow("result");

    // return 0 to indicate successfull execution of the program
    return 0;
}
Example #29
0
int main(int argc, char *argv[])
{
    // Variable to store the keyboard input
    char d;

    // Initialize flags to false. These flags are used for keyboard input (keys q, r, g and b)
    bool Q = false;
    bool R = false;
    bool G = false;
    bool B = false;

    // Set the capture device. Please change the integer value "0" to appropriate value for the capture device.
    CvCapture* capture = cvCaptureFromCAM(0);

    // Allocate memory for all images
    IplImage *src_img;
    IplImage *histogram;
    IplImage *disp_img;
    IplImage *gray_img;
    IplImage *red_img, *red_histogram;
    IplImage *green_img, *green_histogram;
    IplImage *blue_img, *blue_histogram;

    // Initialize historgrams
    int hist_size = 256;
    float range[] = {0,256};
    float *ranges[] = {range};

    /* cvCreateHist function is used to create the histograms. Following is the explanation of the various arguments of the function
    1. First argument: Defines how many dimensions the histogram will have. To keep things simple this tutorial only uses one dimensional histograms.
    2. Second argument: Defines the size of the dimensions. If we were using higher dimensional histograms we would have to pass an array of sizes, one for each dimension, but we are only using one dimension so the array we pass only has one value in it. We choose to make the size of the dimension 256 because the depth of the images we are working with is 8 bit, meaning that we will get one bin for each intensity level. We could have chosen to have less bins but the histogram would have less resolution. We could have chosen more bins but we would not show any more information.
    3. Third argument: Tells OpenCV how to store the data in the histogram. There are two options for this: CV_HIST_ARRAY and CV_HIST_SPARSE. The second option is useful for storing multidimensional histograms that will have most of their bins with counts of zero. Since we are using a one dimensional histogram, we don’t expect sparse data so we choose the first option.
    4. Fourth argument: Used to specify the range of values for each dimension.  Each range is stored as an array of two values, the minimum and maximum value for that dimension. Again we are using one dimension so it looks a bit redundant to make an array of arrays, but only with one array. We have to do this though, because this fourth argument expects an array to 2 value arrays. It is important to know how OpenCV creates the bins based on the range values and the histogram size. OpenCV will take the value supplied in ranges and break it into as many sub intervals as defined in hist_size.
    5. Fifth argument: Defines if the sub intervals are to be split uniformly or not. For this case we definitely want each bin to be the same width as the others so we choose this argument to be 1.
    */
    CvHistogram* hist_red = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1);
    CvHistogram* hist_blue = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1);
    CvHistogram* hist_green = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1);
    double max_value = 0;
    double max_value_red = 0;
    double max_value_green = 0;
    double max_value_blue = 0;
    double find_max = 0;

    // Create the windows
    // "mainWin"  shows the actual captured image
    cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
    cvMoveWindow("mainWin", 5, 5);
    // "histogramWin" shows the histogram
    cvNamedWindow("histogramWin", CV_WINDOW_AUTOSIZE);
    cvMoveWindow("histogramWin", 435, 5);

    // Print instructions for keyboard input
    printf("RGB Histogram\n\n");
    printf("To toggle red channel ON/OFF press: 'r'\n");
    printf("To toggle green channel ON/OFF press: 'g'\n");
    printf("To toggle blue channel ON/OFF press: 'b'\n");
    printf("To quit press: 'q'\n");

    // Do the following inside while loop forever
    while(1)
    {
        // Clear all max values to 0
        max_value = 0; max_value_red = 0; max_value_green = 0; max_value_blue = 0;

        // Load a bmp image with the histogram axis. Instead of having OpenCV draw the axis, the axis is drawn in paint and then loaded as an image.
        // You can download the picture here: http://uowteaminvincible.files.wordpress.com/2010/02/histogram_scale.png
        // Be sure to change the path below to wherever you choose to save the image.  Also use \\ instead of \ to separate directories.
        histogram = cvLoadImage( "histogram_scale.png" );

        // Initialize three images that will show each histogram
        red_histogram = cvCreateImage( cvGetSize(histogram), IPL_DEPTH_8U, 3 );
        green_histogram = cvCreateImage( cvGetSize(histogram), IPL_DEPTH_8U, 3 );
        blue_histogram = cvCreateImage( cvGetSize(histogram), IPL_DEPTH_8U, 3 );

        // Get the source frame by querying the capture and resize it for display
        src_img=cvQueryFrame(capture);
        disp_img = cvCreateImage(cvSize((src_img->width)/1.6,(src_img->height)/1.6),IPL_DEPTH_8U,3);
        cvResize(src_img,disp_img,CV_INTER_LINEAR);

        // Create 3 single channel images to store each channels data and split the source image into the RGB channels.
        /* Note that each channel is given an image with the same size (width and height) as the source. The depth is 8 bits and these new images have only one channel (since they are storing only one channel). The function cvCvtPixToPlane actually separates the source into its three channels and stores them in the images we just created.*/
        blue_img = cvCreateImage( cvGetSize(src_img), IPL_DEPTH_8U, 1 );
        green_img = cvCreateImage( cvGetSize(src_img), IPL_DEPTH_8U, 1 );
        red_img = cvCreateImage( cvGetSize(src_img), IPL_DEPTH_8U, 1 );
        cvCvtPixToPlane( src_img, blue_img, green_img, red_img, 0 );

        // Calculate a histogram for each channel.
        /*The first argument takes the image we would like to calculate the histogram for. Note that cvCalcHist can only take a one channel image. The next argument says which histogram will be populated. The third argument turns on or off accumulator mode. Since we want the histogram to update for each frame we want to clear the contents before adding new ones and the accumulator mode should be off. The final parameter is a mask that can tell cvCalcHist to unly calculate a certain part of the image. By setting the mask null we calculate for the whole image.*/
        cvCalcHist( &red_img, hist_red, 0, NULL );
        cvCalcHist( &blue_img, hist_blue, 0, NULL );
        cvCalcHist( &green_img, hist_green, 0, NULL );

        // Search through the histograms for their maximum value and store it.
        /* The code for finding the maximum value for the red channel is shown above. The other two channels are very similar and shown in the source code. It is done with a simple for loop that checks every value in each bin. The function used to get the values out of the histogram is cvQueryHistValue_1D. This function is made for getting values out of a one dimensional histogram. Its arguments are the histogram you want to look into, and the bin number you want to see. The reason we need to find this max value is that we will scale the y-axis of the histogram so that it fits on the screen. We do this by dividing each bin value by the max value (this is done in the next step).*/
        for( int i = 0; i < hist_size; i++ )
        {
            find_max = cvQueryHistValue_1D(hist_red,i);
            if (find_max > max_value_red)
            {
                max_value_red = find_max;
            }
        }
        for( int i = 0; i < hist_size; i++ )
        {
            find_max = cvQueryHistValue_1D(hist_green,i);
            if (find_max > max_value_green)
            {
                max_value_green = find_max;
            }
        }
        for( int i = 0; i < hist_size; i++ )
        {
            find_max = cvQueryHistValue_1D(hist_blue,i);
            if (find_max > max_value_blue)
            {
                max_value_blue = find_max;
            }
        }
        // The largest value in all the histograms is found.
        max_value = max(max(max_value_red,max_value_green),max_value_blue);

        // Draw the histogram for each channel, if the flag for that channel is set
        /* First we see an if statement that controls whether or not we draw the histogram. It is based on a flag that is set in the next step by user input. This allows the user to select which channel or combination of channels that they want to see. The next step is the scaling. The function cvScale is used. Its first argument is the source to be scaled and the second is the destination for result. Here we used the same place for source and destination meaning that the bins are scaled and then stored back in the same place. The last argument is the scale factor. A factor of 438/max_value was used because the highest bar that we want to draw is 438 pixels high (so it does not go out of the bounds of the picture we used for the axis). Inside the for loop we see a complicated line of code that draws all the rectangles. The function cvRectangle is used for this. It requires that we pass it which image to draw on, two points to define the rectangles, the colour and line thickness. We specify line thickness as -1 which means a filled rectangle. The last function used is cvAdd which is used to simplify drawind the multiple histograms with overlap. By adding colours on top of each other no extar code is required to deal with the overlap.*/
        if (R)
        {
            cvScale( hist_red->bins, hist_red->bins, 438/max_value);
            for( int i= 0; i < hist_size; i++ )
            {
                cvRectangle( red_histogram, cvPoint(i*3+ 15, 448),cvPoint(i*3+16, 448 - cvRound(cvQueryHistValue_1D(hist_red,i))),cvScalar(0x00,0x00,0xff,0), -1);
            }
            cvAdd(histogram,red_histogram,histogram,0);
        }
        if (G)
        {
            cvScale( hist_green->bins, hist_green->bins, 438/max_value);
            for( int i= 0; i < hist_size; i++ )
            {
                cvRectangle( green_histogram, cvPoint(i*3+ 15, 448),cvPoint(i*3+16, 448 - cvRound(cvQueryHistValue_1D(hist_green,i))),cvScalar(0x00,0xff,0x00,0), -1);
            }
            cvAdd(histogram,green_histogram,histogram,0);
        }
        if (B)
        {
            cvScale( hist_blue->bins, hist_blue->bins, 438/max_value);
            for( int i= 0; i < hist_size; i++ )
            {
                cvRectangle( blue_histogram, cvPoint(i*3+ 15, 448),cvPoint(i*3+16, 448 - cvRound(cvQueryHistValue_1D(hist_blue,i))),cvScalar(0xff,0x00,0x00,0), -1);
            }
            cvAdd(histogram,blue_histogram,histogram,0);
        }

        // Show the images in the windows
        cvShowImage("mainWin", disp_img);
        cvShowImage("histogramWin", histogram);

        // Set flags
        d=cvWaitKey(15);
        /* A simple case statement takes the input from the keyboard and sets the flags accordingly. The R, G and B flags are XOR’ed with 1 to change state each time r, g, or b is pressed. This makes r g and b into toggle switches.*/
        switch (d)
        {
            case 'r':   R = R^1;    break;
            case 'g':   G = G^1;    break;
            case 'b':   B = B^1;    break;
            case 'q':   Q = true;   break;
            default:    break;
        }
        if(Q)break;     //quit program

        // Release the images that we created
        cvReleaseImage(&disp_img );
        cvReleaseImage(&red_img );
        cvReleaseImage(&green_img );
        cvReleaseImage(&blue_img );
        cvReleaseImage(&red_histogram );
        cvReleaseImage(&green_histogram );
        cvReleaseImage(&blue_histogram );
        cvReleaseImage(&histogram );
    }
    return 0;
}
Example #30
0
void detect_and_draw( IplImage* img, CvRect* found_face, CvSize sz )
{
	static CvRect prev;
	
	if(!gray) {
		gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
		small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
							 cvRound (img->height/scale)), 8, 1 );
	}

	if(prev.width > 0 && prev.height > 0) {
		cvSetImageROI(small_img, prev);

		CvRect tPrev = cvRect(prev.x * scale, prev.y * scale, prev.width * scale, prev.height * scale);
		cvSetImageROI(img, tPrev);
		cvSetImageROI(gray, tPrev);
	} else {
		cvResetImageROI(img);
		cvResetImageROI(small_img);
		cvResetImageROI(gray);
	}
	
    cvCvtColor( img, gray, CV_BGR2GRAY );
    cvResize( gray, small_img, CV_INTER_LINEAR );
    cvEqualizeHist( small_img, small_img );
    cvClearMemStorage( storage );

//	for(i=0;i<10;i++) {
//		double t = (double)cvGetTickCount();
		CvSeq* faces = mycvHaarDetectObjects( small_img, cascade, storage,
										   1.2, 0, 0
										   |CV_HAAR_FIND_BIGGEST_OBJECT
										   |CV_HAAR_DO_ROUGH_SEARCH
										   //|CV_HAAR_DO_CANNY_PRUNING
										   //|CV_HAAR_SCALE_IMAGE
											 ,sz);
										   //cvSize(30, 30) );
//		t = (double)cvGetTickCount() - t;
//		printf( "detection time = %gms, faces: %d\n", t/((double)cvGetTickFrequency()*1000.), faces->total );
		
	if(faces->total>0) {
		CvRect* r = (CvRect*)cvGetSeqElem( faces, 0 );
		int startX,startY;
		if(prev.width > 0 && prev.height > 0) {
			r->x += prev.x;
			r->y += prev.y;
		}
		startX = MAX(r->x - PAD_FACE,0);
		startY = MAX(r->y - PAD_FACE,0);
		int w = small_img->width - startX - r->width - PAD_FACE_2;
		int h = small_img->height - startY - r->height - PAD_FACE_2;
		int sw = r->x - PAD_FACE, sh = r->y - PAD_FACE;
		prev = cvRect(startX, startY, 
					  r->width + PAD_FACE_2 + ((w < 0) ? w : 0) + ((sw < 0) ? sw : 0),
					  r->height + PAD_FACE_2 + ((h < 0) ? h : 0) + ((sh < 0) ? sh : 0));
//		printf("found face (%d,%d,%d,%d) setting ROI to (%d,%d,%d,%d)\n",r->x,r->y,r->width,r->height,prev.x,prev.y,prev.width,prev.height);
		found_face->x = (int)((double)r->x * scale);
		found_face->y = (int)((double)r->y * scale);
		found_face->width = (int)((double)r->width * scale);
		found_face->height = (int)((double)r->height * scale);
	} else {
		prev.width = prev.height = found_face->width = found_face->height = 0;
	}
}