Example #1
0
void updateImage(){
	IplImage * res = cvCloneImage(birdsview_image);
	if( rectState == 1 || rectState == 2 )
        	cvRectangle( res, cvPoint( rect.x, rect.y ), cvPoint(rect.x + rect.width, rect.y + rect.height ),  cvScalar(0,255,0,0), 2,8,0);
    	cvShowImage( BVwindow, res );
}
Example #2
0
void detect_and_draw(IplImage * img, IplImage * depth, IplImage *faceDepthRet, bool save)
{
	int scale = 1;

	// Create a new image based on the input image
	IplImage *temp =
		cvCreateImage(cvSize(img->width / scale, img->height / scale), 8, 3);
	memcpy(temp->imageData, img->imageData, 640 * 480 * 3);

	IplImage *depthTemp =
		cvCreateImage(cvSize(img->width / scale, img->height / scale), 16, 1);
	memcpy(depthTemp->imageData, depth->imageData, 640 * 480 * 2);

	IplImage *faceDepth =
		cvCreateImage(cvSize(img->width / scale, img->height / scale), 16, 1);

	// Create two points to represent the face locations
	CvPoint pt1, pt2;
	int i, j, k;

	// Clear the memory storage which was used before
	cvClearMemStorage(storage);

	// Find whether the cascade is loaded, to find the faces. If yes, then:
	if (cascade)
	{

		// There can be more than one face in an image. So create a growable
		// sequence of faces.
		// Detect the objects and store them in the sequence
		/* CvSeq* faces = cvHaarDetectObjects( temp, cascade, storage, 1.1, 2, 
		   CV_HAAR_DO_CANNY_PRUNING, cvSize(40, 40) ); */
		CvSeq *faces = cvHaarDetectObjects(temp, cascade, storage,
										   1.6, 2, CV_HAAR_DO_CANNY_PRUNING,
										   cvSize(40, 40));

		// Loop the number of faces found.
		for (i = 0; i < (faces ? faces->total : 0); i++)
		{
			// Create a new rectangle for drawing the face
			CvRect *r = (CvRect *) cvGetSeqElem(faces, i);

			// Find the dimensions of the face,and scale it if necessary
			pt1.x = r->x * scale;
			pt2.x = (r->x + r->width) * scale;
			pt1.y = r->y * scale;
			pt2.y = (r->y + r->height) * scale;

			// Draw the rectangle in the input image
			cvRectangle(temp, pt1, pt2, CV_RGB(0, 0, 255), 3, 8, 0);
			cvRectangle(depthTemp, pt1, pt2, CV_RGB(0, 0, 255), 3, 8, 0);

			cvSetImageROI(depth,
						  cvRect(pt1.x, pt1.y, r->width * scale,
								 r->height * scale));

			IplImage *faceDepthTemp =
				cvCreateImage(cvGetSize(depth), depth->depth,
							  depth->nChannels);
			IplImage *faceDepthTemp2 =
				cvCreateImage(cvGetSize(depth), 8,
							  depth->nChannels);

			cvCopy(depth, faceDepthTemp, NULL);

			cvResetImageROI(depth);

			// Maximize standard deviation.
			//stretchFaceDepth(faceDepthTemp);

			cvResize(faceDepthTemp, faceDepth);
			cvConvertScale(faceDepthTemp, faceDepthTemp2, 1.0/256.0, 0);

			cvResize(faceDepthTemp2, faceDepthRet);

			cvReleaseImage(&faceDepthTemp);

			if (save)
			{
				FILE *csvFile = fopen("face.csv", "w");
				for (j = pt1.y; j < pt2.y; j++)
				{
					for (k = pt1.x; k < pt2.x; k++)
					{
						fprintf(csvFile, "%u,",
								(((uint16_t *) (depth->imageData)) +
								 j * depth->width)[k]);
					}
					fprintf(csvFile, "\n");
				}
				printf("Face captured!\n");
				fclose(csvFile);
			}
		}
	}

	// Show the image in the window named "result"
	cvShowImage("result", temp);
	cvShowImage("resultDepth", depthTemp);
	cvShowImage("faceDepth", faceDepth);

	// Release the temp image created.
	cvReleaseImage(&temp);
	cvReleaseImage(&depthTemp);
	cvReleaseImage(&faceDepth);
}
Example #3
0
void Buoy::TrackObjects(IplImage* imgThresh,IplImage** frame_resized){
    CvMoments *moments=(CvMoments*)malloc(sizeof(CvMoments));
    cvMoments(imgThresh,moments,1);
    double moment10=cvGetSpatialMoment(moments,1,0);
    double moment01=cvGetSpatialMoment(moments,0,1);
    double area=cvGetCentralMoment(moments,0,0);

    if(area>10){
        qDebug()<<area<<endl;
    //	cout<<area<<"\n";
        //printf(" Object Found: ");
        char coordinates_red[32];
        char coordinates_yellow[32];
        char coordinates_green[32];
        setXY(moment10/area,moment01/area);
        int posX=getX();
        int posY=getY();
         CvFont myfont;
        cvInitFont(&myfont,CV_FONT_HERSHEY_COMPLEX_SMALL,0.5,0.5,0.0,1,8);

        if(pHit>0.5){


        if(buoyColor=="yellow"){
            found=true;
            sprintf(coordinates_yellow,"YELLOW|X=%d|Y=%d",posX,posY);
            //sprintf(coordinates_yellow,"Yellow has just been found and");
            cvGetTextSize(coordinates_yellow,&myfont,myfontSize,&baseline);
            cvRectangle(*frame_resized,cvPoint(posX+5,posY+3),cvPoint(posX+125,posY-8),CV_RGB(0,0,0),CV_FILLED);
            cvPutText(*frame_resized,coordinates_yellow,cvPoint(posX+5,posY),&myfont,CV_RGB(255,255,255));
            cvCircle(*frame_resized,cvPoint(posX,posY),4,CV_RGB(255,255,0),2);

        }



        if(buoyColor=="red"){
            sprintf(coordinates_red,"RED|X=%d|Y=%d",posX,posY);
            cvGetTextSize(coordinates_red,&myfont,myfontSize,&baseline);
            cvRectangle(*frame_resized,cvPoint(posX+5,posY+3),cvPoint(posX+105,posY-8),CV_RGB(0,0,0),CV_FILLED);
            cvPutText(*frame_resized,coordinates_red,cvPoint(posX+5,posY),&myfont,CV_RGB(255,255,255));
            cvCircle(*frame_resized,cvPoint(posX,posY),4,CV_RGB(255,0,0),2);
        }



        if(buoyColor=="green"){
            sprintf(coordinates_green,"GREEN|X=%d|Y=%d",posX,posY);
            cvGetTextSize(coordinates_green,&myfont,myfontSize,&baseline);
            cvRectangle(*frame_resized,cvPoint(posX+5,posY+3),cvPoint(posX+124,posY-8),CV_RGB(0,0,0),CV_FILLED);
            cvPutText(*frame_resized,coordinates_green,cvPoint(posX+5,posY),&myfont,CV_RGB(255,255,255));
            cvCircle(*frame_resized,cvPoint(posX,posY),4,CV_RGB(0,255,0),2);

        }
}




    }
    else
          found=false;

    free(moments);
}
Example #4
0
/* main fun */
int main(int argc, char** argv){
	if(argc != 4){
		printf("USAGE: ./mmm `PATH-TO/real_gray_tmpl_X.jpg PATH-TO/masked_tmpl_X.jpg PATH-TO/source.jpg`\n");
		exit(1);
	}
	IplImage* src = cvLoadImage(argv[3], -1);
	IplImage* real_tpl = cvLoadImage(argv[1], -1);
	IplImage* mask_tpl = cvLoadImage(argv[2], -1);
	//init_mask(mask_tpl);
	if(!src || !real_tpl || !mask_tpl){
		printf("Failed to load image.\n");
		exit(1);
	}
	IplImage* gray_src = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage* sub_res  = cvCreateImage(cvGetSize(real_tpl), IPL_DEPTH_8U, 1);
	cvCvtColor(src, gray_src, CV_BGR2GRAY);
	CvSize src_size = cvGetSize(gray_src);
	CvSize tpl_size = cvGetSize(real_tpl);
	CvPoint region  = cvPoint(src_size.width - tpl_size.width, src_size.height - tpl_size.height);
	int h_i, w_j;
	for(h_i=0;h_i<region.y;h_i++){
		for(w_j=0;w_j<region.x;w_j++){
		    CvRect match_roi = cvRect(w_j, h_i, tpl_size.width, tpl_size.height);
		    cvSetImageROI(gray_src, match_roi);
			double avg = 0.0;
			my_cvAvg(gray_src, real_tpl, sub_res, mask_tpl, &avg);
			//printf("avg: %.f\n", avg);
			if(mp_ptr >= MAX_WM_NUMS){
				printf("water mark number overflow\n");
				exit(-1);
			}
			if(avg < THRESHOLD){
				match_points[mp_ptr].lt_pos = cvPoint(w_j, h_i);
				match_points[mp_ptr].avg      = avg;
				mp_ptr++;
			}
		}
	}
	printf("mp_ptr: %d\n", mp_ptr);
	int i;
	for(i=0;i<mp_ptr;i++){
		CvPoint lt = match_points[i].lt_pos;
		printf("pos: (%d,%d)\n", lt.x, lt.y);
		CvPoint rb = cvPoint(lt.x+tpl_size.width, lt.y+tpl_size.height);
		cvRectangle(src, lt, rb, cvScalar(0,0,255,1), 1, 8, 0);
	}
	
	//cvResetImageROI(gray_src);
	show(src);
	/*
	printf("size: %d\n", src->height);
	printf("MIN: %f\n",min);
	printf("min pos: (%d, %d)\n", min_pos.x, min_pos.y);

	cvRectangle(src, min_pos, cvPoint(min_pos.x+tpl_size.width,min_pos.y+tpl_size.height),cvScalar(0,0,255,1),1,8,0);

	cvNamedWindow("match",CV_WINDOW_AUTOSIZE);
	cvShowImage("match",src);
	cvWaitKey(0);
	cvDestroyWindow("match");

	uchar* gsdata = (uchar*)gray_src->imageData;
	uchar* mtdata = (uchar*)mask_tpl->imageData;
	uchar* rtdata = (uchar*)real_tpl->imageData;
	uchar* subdata= (uchar*)sub_res->imageData;

	*/

	cvReleaseImage(&src);
	cvReleaseImage(&real_tpl);
	cvReleaseImage(&mask_tpl);
	cvReleaseImage(&gray_src);
	cvReleaseImage(&sub_res);
}
Example #5
0
void Main::AnalyzePicture(String fileName, String outputPath) {
	String imageFileName = fileName + ".jpg";
//	imageFileName.print();
 	IplImage* img;
	img = cvLoadImage( imageFileName);

	//cvNamedWindow( "Output" );
	//cvShowImage( "Output", img );
	//cvWaitKey();
	CvMemStorage* faceStorage = cvCreateMemStorage(0);
	CvMemStorage* eyeStorage = cvCreateMemStorage(0);

	CvHaarClassifierCascade* faceCascade = (CvHaarClassifierCascade*)cvLoad( "haarcascade_frontalface_alt2.xml" );
	CvHaarClassifierCascade* eyeLeftCascade = (CvHaarClassifierCascade*)cvLoad( "eyeLeftCascade.xml" );
	CvHaarClassifierCascade* eyeRightCascade = (CvHaarClassifierCascade*)cvLoad( "eyeRightCascade.xml" );
	double scale = 1.3;

	static CvScalar colors[] = { {{0,0,255}}, {{0,128,255}}, {{0,255,255}}, 
	{{0,255,0}}, {{255,128,0}}, {{255,255,0}}, {{255,0,0}}, {{255,0,255}} };

	CvRect* r;
	// Detect faces
	cvClearMemStorage( faceStorage );
	CvSeq* faces = cvHaarDetectObjects( img, faceCascade, faceStorage, 1.1, 4, 0, cvSize( 40, 50 ));

	String workingFileName;
	// Loop through Faces in picture and draw boxes
	for( int i = 0; i < (faces ? faces->total : 0 ); i++ ){
		r = ( CvRect* )cvGetSeqElem( faces, i );

		cvRectangle( img, cvPoint( r->x, r->y ), cvPoint( r->x + r->width, r->y + r->height ),
			colors[i%8]);

		/* sets the Region of Interest Note that the rectangle area has to be __INSIDE__ the image */
		cvSetImageROI(img, cvRect(r->x, r->y, r->width, r->height));

		IplImage *saveFace = cvCreateImage(cvSize(r->width, r->height), 8, 3);
		cvCopy(img, saveFace, NULL);
 
		// Eyes
		CvSeq* eyes; int numEyes = 0;
		cvClearMemStorage( eyeStorage );
		eyes = cvHaarDetectObjects( img, eyeLeftCascade, eyeStorage, 1.1, 4, 0);
		this->drawEyes(eyes, img, true);
		numEyes += eyes->total;

		cvClearMemStorage( eyeStorage );
		eyes = cvHaarDetectObjects( img, eyeRightCascade, eyeStorage, 1.1, 4, 0);
		this->drawEyes(eyes, img, false);
		numEyes += eyes->total;

		//char buffer[50];
		//sprintf(buffer, "Output %ld", i);
		//cvShowImage(buffer, img );
		//cvWaitKey();

		bool accepted = numEyes > 1;
		String suffix = accepted ? "jpg" : "reject.jpg";
		if (accepted) 		
			printf("Extracted face\n");
		else
			printf("Rejected face.\n");

		workingFileName.sprintf("%s\\%s.Face_%ld.markup.%s",outputPath.c_str() ,fileName.c_str(),i,suffix.c_str());
		printf("Writing %s\n", workingFileName.c_str());
		remove(workingFileName.c_str());
		cvSaveImage(workingFileName, img);	// save marked up face

		workingFileName.sprintf("%s\\%s.Face_%ld.%s",outputPath.c_str() ,fileName.c_str(),i,suffix.c_str());
		printf("Writing %s\n", workingFileName.c_str());
		remove(workingFileName.c_str());
		cvSaveImage(workingFileName, saveFace);	// save unmarked face

		cvResetImageROI(img);
		cvReleaseImage( &saveFace );
	}

	//cvNamedWindow( "Output" );
	//cvShowImage( "Output", img );
	//cvWaitKey();
	workingFileName.sprintf("%s\\%s.markup.jpg",outputPath.c_str() ,fileName.c_str());
	printf("Writing %s\n", workingFileName.c_str());
	remove(workingFileName.c_str());
	cvSaveImage(workingFileName, img);		// save marked up picture

	cvReleaseImage( &img );
}
IplImage* GameScreen::showScene(CvPoint currentPoint, Spell currentSpell){
	IplImage*  workingImage= cvCreateImage(cvSize(640, 480), 8, 3);

	int maxY;
	int maxX;

	if (currentSpell == noSpell)
		cvRectangle(workingImage, cvPoint(0, 0), cvPoint(639, 479), CV_RGB(0, 0, 0), -1, 4);
	else if (currentSpell == greenSpell)
		cvRectangle(workingImage, cvPoint(0, 0), cvPoint(639, 479), CV_RGB(0, 250, 0), -1, 4);
	else if (currentSpell == purpleSpell)
		cvRectangle(workingImage, cvPoint(0, 0), cvPoint(639, 479), CV_RGB(255, 0, 255), -1, 4);
	else if(currentSpell == orangeSpell)
		cvRectangle(workingImage, cvPoint(0, 0), cvPoint(639, 479), CV_RGB(255, 100, 0), -1, 4);

	

	for (int y=areaCoordinate.y; y < areaCoordinate.y + currentMonster->height; y++ ) {
		uchar* ptr = (uchar*) (
			workingImage -> imageData + y * workingImage -> widthStep );
		uchar* ptrSmall = (uchar*) (
			currentMonster -> imageData + (y - areaCoordinate.y)  * currentMonster -> widthStep );

		for( int x=areaCoordinate.x;  x < areaCoordinate.x + currentMonster->width; x++ ) {
			if(ptrSmall[3 * (x - areaCoordinate.x)] > 220 &&  ptrSmall[3 * (x - areaCoordinate.x) + 1]> 220 && ptrSmall[3 * (x  - areaCoordinate.x) + 2] > 220);
			else{
				ptr[3 * x] = ptrSmall[3 * (x - areaCoordinate.x)];
				ptr[3 * x + 1] = ptrSmall[3 * (x - areaCoordinate.x) + 1];
				ptr[3 * x + 2] = ptrSmall[3 * (x  - areaCoordinate.x) + 2];
			}
		}
	}


	if(currentPoint.y + wand->height > workingImage -> height)
		maxY =  workingImage -> height;
	else
		maxY = currentPoint.y + wand->height;

	if(currentPoint.x + wand->width > workingImage -> width)
		maxX =  workingImage -> width;
	else
		maxX = currentPoint.x + wand->width;


	for (int y=currentPoint.y; y < maxY; y++ ) {
		uchar* ptr = (uchar*) (
			workingImage -> imageData + y * workingImage -> widthStep );
		uchar* ptrSmall = (uchar*) (
			wand -> imageData + (y - currentPoint.y)  * wand -> widthStep );

		for( int x=currentPoint.x; x < maxX; x++ ) {
			if(ptrSmall[3 * (x - currentPoint.x)] > 220 &&  ptrSmall[3 * (x - currentPoint.x) + 1]> 220 && ptrSmall[3 * (x  - currentPoint.x) + 2] > 220);
			else{
				ptr[3 * x] = ptrSmall[3 * (x - currentPoint.x)];
				ptr[3 * x + 1] = ptrSmall[3 * (x - currentPoint.x) + 1];
				ptr[3 * x + 2] = ptrSmall[3 * (x  - currentPoint.x) + 2];
			}
		}
	}

	return workingImage;
}
Example #7
0
int main(int argc, char **argv) {
	
	// Prepare log file and check argument count
	FILE* log_file = fopen("track_results.log","a");
	if(argc != 4) {
		fprintf(log_file, "Incorrect number of arguments.\n");
		return 1;
	}

	int desired_fps = atoi(argv[3]);
	if(desired_fps > 60 || desired_fps < 1) {
		fprintf(log_file, "Invalid FPS: please select a value in the range [1-60].\n");
		return 1;
	}

	////////// GROUND TRUTH SETUP AND PROCESSING //////////

	// Open and extract bounding rect info from gt file
	char buffer[100];
	memset(buffer, 0, sizeof(buffer));
	int gt_rect[4];

	FILE* gt_file = fopen(argv[2], "r");
	fgets(buffer, 100, gt_file);

	char* token = strtok(buffer, ",");
	gt_rect[0] = atoi(token);

	int i = 1;
	while(i < 4) {
		token = strtok(NULL, ",");
		gt_rect[i] = atoi(token);
		i++;
	}

	fclose(gt_file);

	// Load image and compress to a reasonable size
	IplImage* gt = cvLoadImage(argv[1]);

	IplImage* gt_resized = cvCreateImage(cvSize(320, 240), gt->depth, gt->nChannels); //1280,720
	cvResize(gt, gt_resized, CV_INTER_NN);

	// Show bounding rect
	CvPoint corner1 = cvPoint(gt_rect[0], gt_rect[1]);
	CvPoint corner2 = cvPoint(gt_rect[0] + gt_rect[2], gt_rect[1] + gt_rect[3]);
	CvScalar rect_color = CV_RGB(255,0,0);

	cvRectangle(gt_resized, corner1, corner2, rect_color, 2);

	cvNamedWindow( "Ground Truth Reference", CV_WINDOW_AUTOSIZE );
    cvShowImage( "Ground Truth Reference", gt_resized );

	// Set ROI for ground truth
	CvRect quarter = cvRect(gt_rect[0], gt_rect[1], gt_rect[2], gt_rect[3]);
	cvSetImageROI(gt_resized, quarter);

	////////// PREPARE GOPRO FOR VIDEO CAPTURE //////////

	// Basic connectivity tests
	HINTERNET hSession = WinHttpOpen( L"GoPro HTTP Transfer/1.1", 
                              WINHTTP_ACCESS_TYPE_DEFAULT_PROXY,
                              WINHTTP_NO_PROXY_NAME, 
                              WINHTTP_NO_PROXY_BYPASS, 0 ); 

	if(hSession == NULL) {
		printf("Error %u in WinHttpOpen.\n", GetLastError());
		std::cin.get();
		return 1;
	}

	if( !WinHttpSetTimeouts( hSession, 10000, 10000, 10000, 10000 )) {
		printf( "Error %u in WinHttpSetTimeouts.\n", GetLastError());
		std::cin.get();
		return 1;
	}

	HINTERNET hConnect = WinHttpConnect( hSession, L"10.5.5.9", 80, 0);

	if(hConnect == NULL) {
		printf("Error %u in WinHttpConnect.\n", GetLastError());
		std::cin.get();
		return 1;
	}
	
	// Power on
	bool error = ping_request(hConnect, L"/bacpac/PW?t=goprohero&p=%01");
	if(error) {
		return 1;
	}

	Sleep(5000); //give time to boot up

	//Clear memory

	error = ping_request(hConnect, L"/camera/DA?t=goprohero");
	if(error) {
		return 1;
	}

	Sleep(5000); //give time to delete files

	// Set to video mode
	error = ping_request(hConnect, L"/camera/CM?t=goprohero&p=%00");
	if(error) {
		return 1;
	}

	Sleep(1000);

	// Set video resolution to 720p, 30FPS
	error = ping_request(hConnect, L"/camera/VR?t=goprohero&p=%00");
	if(error) {
		return 1;
	}

	Sleep(1000);

	WinHttpCloseHandle(hConnect);
	WinHttpCloseHandle(hSession);

	////////// PREPARE TIMING & VIDEO RESOURCES //////////

	// Prepare timing instrumentation (for FPS control)
	__int64 last_time = 0;
	__int64 current_time = 0;
	__int64 freq = 0;
	int frame_time = 1000 / desired_fps;

	// Play video
	cvNamedWindow( "MOV Window", CV_WINDOW_AUTOSIZE );
	CvCapture* track_video = cvCreateFileCapture( "tags.mov" );
	IplImage* current_frame;

	// Record annotated video
	CvSize write_size = cvSize(
       (int)cvGetCaptureProperty( track_video, CV_CAP_PROP_FRAME_WIDTH),
       (int)cvGetCaptureProperty( track_video, CV_CAP_PROP_FRAME_HEIGHT)
    );	
	CvVideoWriter *writer = cvCreateVideoWriter( "output.avi", CV_FOURCC('M','J','P','G'), 20, write_size, 1);

	// Start timer
	QueryPerformanceCounter((LARGE_INTEGER*) &last_time);

	////////// MAIN PROCESSING LOOP //////////

	bool to_search = true;
	bool next = true;
	CvRect est = quarter;

	while(1) {

		// Read in current frame
		current_frame = cvQueryFrame(track_video);
		if(current_frame == NULL) {
			break;
		}

		if(to_search == false) {
			est = process_frame(gt_resized, current_frame, quarter, &next, log_file);
			rect_color = CV_RGB(0,255,0);
		} else {
			est = search(gt_resized, current_frame, quarter, &next, log_file);
			rect_color = CV_RGB(255,0,0);
		}

		fprintf(log_file, "Coordinates: %d , %d\t\t", est.x, est.y);
		if(to_search) {
			fprintf(log_file, "Recommended Action: Search\n");
		} else {
			// X direction flight planning
			if(est.x < ((current_frame->width / 2) - 10)) {
				fprintf(log_file, "Recommended Action: Move Right , ");
			}
			else if(est.x > ((current_frame->width / 2) + 10)) {
				fprintf(log_file, "Recommended Action: Move Left, ");
			}
			else {
				fprintf(log_file, "Recommended Action: Hover, ");
			}
			
			// Y direction flight planning
			if(est.y < ((current_frame->height / 2) - 10)) {
				fprintf(log_file, "Move Backwards\n");
			}
			else if(est.y > ((current_frame->width / 2) + 10)) {
				fprintf(log_file, "Move Forwards\n");
			}
			else {
				fprintf(log_file, "Hover\n");
			}
		}
		to_search = next;

		// Swap frames
		quarter = est;

		CvPoint corner1 = cvPoint(est.x, est.y);
		CvPoint corner2 = cvPoint(est.x + est.width, est.y + est.height);

		cvRectangle(current_frame, corner1, corner2, rect_color, 2);

		// Display frame
		cvShowImage( "MOV Window", current_frame );
		cvWriteFrame( writer, current_frame );

		// FPS Control
		QueryPerformanceCounter((LARGE_INTEGER*) &current_time);
		QueryPerformanceFrequency((LARGE_INTEGER*) &freq);

		int elapsed_time = (int)((current_time - last_time) / freq);
		int wait_time = frame_time - (elapsed_time / 1000);

		if(wait_time < 0) {
			continue;
		}

		char ext_key = cvWaitKey(wait_time);
		if(ext_key == 27) {
			break;
		}

	}

	////////// CLEAN-UP //////////

	cvReleaseCapture( &track_video );
	cvReleaseVideoWriter( &writer );
	cvDestroyWindow( "MOV Window" );

	cvReleaseImage( &gt );
	cvReleaseImage( &gt_resized );

	cvDestroyWindow( "Ground Truth Reference" );

	fclose(log_file);

	return 0;

}
Example #8
0
 int main() {
  CvPoint pt1,pt2;
  CvRect regt;
   CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,144);
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,216);	 
  // Create a window in which the captured images will be presented
   cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   // Show the image captured from the camera in the window and repeat
   while ( 1 ) {
     // Get one frame
     IplImage* frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }
     int modfheight, modfwidth;

     modfheight = frame->height;
     modfwidth = frame->width;
     // create modified frame with 1/4th the original size
     IplImage* modframe = cvCreateImage(cvSize((int)(modfwidth/4),(int)(modfheight/4)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
     IplImage* hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
     // create a frame within threshold. 
     IplImage* threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
     cvInRangeS(hsvframe,cvScalar(30, 25, 150),cvScalar(60, 60, 220),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     // created dilated image
     IplImage* dilframe = cvCreateImage(cvGetSize(threshframe),8,1);
     cvDilate(threshframe,dilframe,NULL,2); //cvDilate(input frame, output frame, mask, number of times to dilate)

     CBlobResult blobs; 
     blobs = CBlobResult(dilframe,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob;
     blobs.GetNthBlob(CBlobGetArea(),0,biggestblob); //GetNthBlob(criteria, number, output) Get only  the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1.x = biggestblob.MinX()*4;
     pt1.y = biggestblob.MinY()*4;
     pt2.x = biggestblob.MaxX()*4;
     pt2.y = biggestblob.MaxY()*4;
     cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob

     cvShowImage( "mywindow", frame); // show output image
     // Do not release the frame!
     //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27 ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   cvDestroyWindow( "mywindow" );
   return 0;
 }
std::list<Garbage*>
GarbageRecognition::garbageList(IplImage * src, IplImage * model) {



    std::list<Garbage*> garbageList;

    //cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
    //object model

    //image for the histogram-based filter
    //could be a parameter

    //~ cvNamedWindow("andImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("andSimage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("andSIImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("drawContours",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("andSThreshImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("threshImage",CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("andSequalizedImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("morphImage",CV_WINDOW_AUTOSIZE);

    utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
    CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

    //~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
    //~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);



    //gets a frame for setting  image size
    //CvSize srcSize = cvSize(frameWidth,frameHeight);
    CvSize srcSize = cvGetSize(src);

    //images for HSV conversion
    IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
    IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
    IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
    IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );


    //Image for thresholding
    IplImage * andImage=cvCreateImage(srcSize,8,1);

    IplImage * andSimage=cvCreateImage(srcSize,8,1);
    IplImage * andSThreshImage=cvCreateImage(srcSize,8,1);
    IplImage * andSequalizedImage=cvCreateImage(srcSize,8,1);
    IplImage * andSIImage=cvCreateImage(srcSize,8,1);

    //Image for thresholding
    IplImage * threshImage=cvCreateImage(srcSize,8,1);

    //image for equalization
    IplImage * equalizedImage=cvCreateImage(srcSize,8,1);

    //image for Morphing operations(Dilate-erode)
    IplImage * morphImage=cvCreateImage(srcSize,8,1);

    //image for image smoothing
    IplImage * smoothImage=cvCreateImage(srcSize,8,1);

    //image for contour-finding operations
    IplImage * contourImage=cvCreateImage(srcSize,8,3);


    int frameCounter=1;
    int cont_index=0;

    //convolution kernel for morph operations
    IplConvKernel* element;

    CvRect boundingRect;

    //contours
    CvSeq * contours;

    //Main loop


    //convert image to hsv
    cvCvtColor( src, hsv, CV_BGR2HSV );
    cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );


    /*I(x,y)blue ~ ((uchar*)(img->imageData + img->widthStep*y))[x*3]
    I(x,y)green ~ ((uchar*)(img->imageData + img->widthStep*y))[x*3+1]
    I(x,y)red ~ ((uchar*)(img->imageData + img->widthStep*y))[x*3+2]*/

    for(int x=0; x<640; x++) {
        for(int y=0; y<480; y++) {
            uchar * hue=&((uchar*) (h_plane->imageData+h_plane->widthStep*y))[x];
            uchar * sat=&((uchar*) (s_plane->imageData+s_plane->widthStep*y))[x];
            uchar * val=&((uchar*) (v_plane->imageData+v_plane->widthStep*y))[x];
            if((*hue>20 && *hue<40 && *sat>60))
                *hue=255;
            else
                *hue=0;
        }
    }
    cvAnd(h_plane, v_plane, andImage);
    cvAnd(h_plane, s_plane, andSimage);


    //apply morphologic operations
    element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
                                            MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
                                            CV_SHAPE_RECT, NULL);


    cvDilate(andImage,morphImage,element,MORPH_DILATE_ITER);
    cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);


    cvThreshold(morphImage,threshImage,120,255,CV_THRESH_BINARY);

    //get all contours
    contours=myFindContours(threshImage);
    //contours=myFindContours(smoothImage);


    cont_index=0;

    cvCopy(src,contourImage,0);



    while(contours!=NULL) {

        CvSeq * aContour=getPolygon(contours);
        utils::Contours * ct = new Contours(aContour);




        //apply filters


        if( ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER) &&
                ct->areaFilter(MINCONTOUR_AREA,MAXCONTOUR_AREA) &&
                //ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO) &&
                ct->boxAreaFilter(BOXFILTER_TOLERANCE) &&
                //ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN)&&
                1) {



            //get contour bounding box
            boundingRect=cvBoundingRect(ct->getContour(),0);
            cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
                        cvPoint(boundingRect.x+boundingRect.width,
                                boundingRect.y+boundingRect.height),
                        _GREEN,1,8,0);


            //if passed filters
            ct->printContour(3,cvScalar(127,127,0,0),
                             contourImage);

            std::vector<int> centroid(2);
            centroid=ct->getCentroid();


            //build garbage List
            utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
                    boundingRect.y,boundingRect.width,boundingRect.height);

            utils::Garbage * aGarbage = new utils::Garbage(r,centroid);

            garbageList.push_back(aGarbage);


        }

        delete ct;
        cvReleaseMemStorage( &aContour->storage );
        contours=contours->h_next;
        cont_index++;
    }

    cvShowImage("drawContours",contourImage);
    // cvWaitKey(0);
    delete h;


    cvReleaseHist(&testImageHistogram);
    //Image for thresholding
    //cvReleaseMemStorage( &contours->storage );
    cvReleaseImage(&threshImage);
    cvReleaseImage(&equalizedImage);
    cvReleaseImage(&morphImage);
    cvReleaseImage(&smoothImage);
    cvReleaseImage(&contourImage);

    cvReleaseImage(&hsv);
    cvReleaseImage(&h_plane);
    cvReleaseImage(&s_plane);
    cvReleaseImage(&v_plane);
    cvReleaseImage(&andImage);
    cvReleaseImage(&andSimage);
    cvReleaseImage(&andSThreshImage);
    cvReleaseImage(&andSequalizedImage);



    return garbageList;
}
Example #10
0
// chain function - this function does the actual processing
static GstFlowReturn
gst_bgfg_acmmm2003_chain(GstPad *pad, GstBuffer *buf)
{
    GstBgFgACMMM2003 *filter;

    // sanity checks
    g_return_val_if_fail(pad != NULL, GST_FLOW_ERROR);
    g_return_val_if_fail(buf != NULL, GST_FLOW_ERROR);

    filter = GST_BGFG_ACMMM2003(GST_OBJECT_PARENT(pad));

    filter->image->imageData = (gchar*) GST_BUFFER_DATA(buf);

    // the bg model must be initialized with a valid image; thus we delay its
    // creation until the chain function
    if (filter->model == NULL) {
        filter->model = cvCreateFGDStatModel(filter->image, NULL);

        ((CvFGDStatModel*)filter->model)->params.minArea           = filter->min_area;
        ((CvFGDStatModel*)filter->model)->params.erode_iterations  = filter->n_erode_iterations;
        ((CvFGDStatModel*)filter->model)->params.dilate_iterations = filter->n_dilate_iterations;

        return gst_pad_push(filter->srcpad, buf);
    }

    cvUpdateBGStatModel(filter->image, filter->model, -1);

    // send mask event, if requested
    if (filter->send_mask_events) {
        GstStructure *structure;
        GstEvent     *event;
        GArray       *data_array;
        IplImage     *mask;

        // prepare and send custom event with the mask surface
        mask = filter->model->foreground;
        data_array = g_array_sized_new(FALSE, FALSE, sizeof(mask->imageData[0]), mask->imageSize);
        g_array_append_vals(data_array, mask->imageData, mask->imageSize);

        structure = gst_structure_new("bgfg-mask",
                                      "data",      G_TYPE_POINTER, data_array,
                                      "width",     G_TYPE_UINT,    mask->width,
                                      "height",    G_TYPE_UINT,    mask->height,
                                      "depth",     G_TYPE_UINT,    mask->depth,
                                      "channels",  G_TYPE_UINT,    mask->nChannels,
                                      "timestamp", G_TYPE_UINT64,  GST_BUFFER_TIMESTAMP(buf),
                                      NULL);

        event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
        gst_pad_push_event(filter->srcpad, event);
        g_array_unref(data_array);

        if (filter->display) {
            // shade the regions not selected by the acmmm2003 algorithm
            cvXorS(mask,          CV_RGB(255, 255, 255), mask,          NULL);
            cvSubS(filter->image, CV_RGB(191, 191, 191), filter->image, mask);
            cvXorS(mask,          CV_RGB(255, 255, 255), mask,          NULL);
        }
    }

    if (filter->send_roi_events) {
        CvSeq        *contour;
        CvRect       *bounding_rects;
        guint         i, j, n_rects;

        // count # of contours, allocate array to store the bounding rectangles
        for (contour = filter->model->foreground_regions, n_rects = 0;
             contour != NULL;
             contour = contour->h_next, ++n_rects);

        bounding_rects = g_new(CvRect, n_rects);

        for (contour = filter->model->foreground_regions, i = 0; contour != NULL; contour = contour->h_next, ++i)
            bounding_rects[i] = cvBoundingRect(contour, 0);

        for (i = 0; i < n_rects; ++i) {
            // skip collapsed rectangles
            if ((bounding_rects[i].width == 0) || (bounding_rects[i].height == 0)) continue;

            for (j = (i + 1); j < n_rects; ++j) {
                // skip collapsed rectangles
                if ((bounding_rects[j].width == 0) || (bounding_rects[j].height == 0)) continue;

                if (rect_overlap(bounding_rects[i], bounding_rects[j])) {
                    bounding_rects[i] = rect_collapse(bounding_rects[i], bounding_rects[j]);
                    bounding_rects[j] = NULL_RECT;
                }
            }
        }

        for (i = 0; i < n_rects; ++i) {
            GstEvent     *event;
            GstStructure *structure;
            CvRect        r;

            // skip collapsed rectangles
            r = bounding_rects[i];
            if ((r.width == 0) || (r.height == 0)) continue;

            structure = gst_structure_new("bgfg-roi",
                                          "x",         G_TYPE_UINT,   r.x,
                                          "y",         G_TYPE_UINT,   r.y,
                                          "width",     G_TYPE_UINT,   r.width,
                                          "height",    G_TYPE_UINT,   r.height,
                                          "timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP(buf),
                                          NULL);

            event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
            gst_pad_send_event(filter->sinkpad, event);

            if (filter->verbose)
                GST_INFO("[roi] x: %d, y: %d, width: %d, height: %d\n",
                         r.x, r.y, r.width, r.height);

            if (filter->display)
                cvRectangle(filter->image, cvPoint(r.x, r.y), cvPoint(r.x + r.width, r.y + r.height),
                            CV_RGB(0, 0, 255), 1, 0, 0);
        }

        g_free(bounding_rects);
    }

    if (filter->display)
        gst_buffer_set_data(buf, (guchar*) filter->image->imageData, filter->image->imageSize);

    return gst_pad_push(filter->srcpad, buf);
}
static void putDistance(IplImage *Image,
                        std::vector<cv_tracker::image_rect_ranged> objects,
                        int threshold_height,
                        const char* objectLabel)
{
  char distance_string[32];
  CvFont dfont;
  float hscale	    = 0.7f;
  float vscale	    = 0.7f;
  float italicscale = 0.0f;
  int	thickness   = 1;

  CvFont      dfont_label;
  float       hscale_label = 0.5f;
  float       vscale_label = 0.5f;
  CvSize      text_size;
  int         baseline     = 0;

  cvInitFont(&dfont_label, CV_FONT_HERSHEY_COMPLEX, hscale_label, vscale_label, italicscale, thickness, CV_AA);
  cvGetTextSize(objectLabel,
                &dfont_label,
                &text_size,
                &baseline);

  for (unsigned int i=0; i<objects.size(); i++)
    {
      if (objects.at(i).rect.y > threshold_height) // temporal way to avoid drawing detections in the sky
        {
          if (!isNearlyNODATA(objects.at(i).range))
            {
              /* put label */
              CvPoint labelOrg = cvPoint(objects.at(i).rect.x - OBJ_RECT_THICKNESS,
                                         objects.at(i).rect.y - baseline - OBJ_RECT_THICKNESS);

              cvRectangle(Image,
                          cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                          cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                          CV_RGB(0, 0, 0), // label background is black
                          -1, 8, 0
                          );
              cvPutText(Image,
                        objectLabel,
                        labelOrg,
                        &dfont_label,
                        CV_RGB(255, 255, 255) // label text color is white
                        );

              /* put distance data */
              cvRectangle(Image,
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 45),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 5),
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) + (((int)log10(objects.at(i).range/100)+1) * 8 + 38),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 30),
                          cv::Scalar(255,255,255),
                          -1);

              cvInitFont (&dfont,
                          CV_FONT_HERSHEY_COMPLEX,
                          hscale,
                          vscale,
                          italicscale,
                          thickness,
                          CV_AA);

              sprintf(distance_string, "%.2f m", objects.at(i).range / 100); //unit of length is meter
              cvPutText(Image,
                        distance_string,
                        cvPoint(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 40),
                                objects.at(i).rect.y + objects.at(i).rect.height + 25),
                        &dfont,
                        CV_RGB(255, 0, 0));
            }
        }
    }
}
Example #12
0
 int main()  
 {  
     CBlobResult blobs;    
     CBlob *currentBlob;   
     CvPoint pt1, pt2;  
     CvRect cvRect;  
     int key = 0;  
     IplImage* frame = 0;  
   
     // Initialize capturing live feed from video file or camera  
     CvCapture* capture = cvCaptureFromFile( "MOV.MPG" );  
   
     // Get the frames per second  
     int fps = ( int )cvGetCaptureProperty( capture,  
                                            CV_CAP_PROP_FPS );    
   
     // Can't get device? Complain and quit  
     if( !capture )  
     {  
         printf( "Could not initialize capturing...\n" );  
         return -1;  
     }  
   
     // Windows used to display input video with bounding rectangles  
     // and the thresholded video  
     cvNamedWindow( "video" );  
     cvNamedWindow( "thresh" );        
   
     // An infinite loop  
     while( key != 'x' ) 
     { 
         // If we couldn't grab a frame... quit  
         if( !( frame = cvQueryFrame( capture ) ) )  
             break;        
   
         // Get object's thresholded image (blue = white, rest = black)  
         IplImage* imgThresh = GetThresholdedImageHSV( frame );        
   
         // Detect the white blobs from the black background  
         blobs = CBlobResult( imgThresh, NULL, 0 );    
   
         // Exclude white blobs smaller than the given value (10)    
         // The bigger the last parameter, the bigger the blobs need    
         // to be for inclusion    
         blobs.Filter( blobs,  
                       B_EXCLUDE,  
                       CBlobGetArea(),  
                       B_LESS,  
                       10 );           
   
         // Attach a bounding rectangle for each blob discovered  
         int num_blobs = blobs.GetNumBlobs();  
   
         for ( int i = 0; i < num_blobs; i++ )    
         {                 
             currentBlob = blobs.GetBlob( i );               
             cvRect = currentBlob->GetBoundingBox();  
   
             pt1.x = cvRect.x;  
             pt1.y = cvRect.y;  
             pt2.x = cvRect.x + cvRect.width;  
             pt2.y = cvRect.y + cvRect.height;  
   
             // Attach bounding rect to blob in orginal video input  
             cvRectangle( frame,  
                          pt1,   
                          pt2,  
                          cvScalar(0, 0, 0, 0),  
                          1,  
                          8,  
                          0 );  
         }  
   
         // Add the black and white and original images  
         cvShowImage( "thresh", imgThresh );  
         cvShowImage( "video", frame );  
   
         // Optional - used to slow up the display of frames  
         key = cvWaitKey( 2000 / fps );  
   
         // Prevent memory leaks by releasing thresholded image  
         cvReleaseImage( &imgThresh );        
     }  
   
     // We're through with using camera.   
     cvReleaseCapture( &capture );  
   
     return 0;  
 }  
Example #13
0
int WINAPI WinMain(HINSTANCE hThisInstance, HINSTANCE hPrevInstance, LPSTR lpszArgs, int nWinMode)
{
	// переменные для хранения изображений
	IplImage *frame = 0, *image = 0, *hsv = 0, *dst = 0, *dst2 = 0, *color_indexes = 0, *dst3 = 0, *image2 = 0, *tmp = 0;
	int key = 0, zx = 0, zy = 0;

	// загружаем картинку из файла
	IplImage *menu = cvLoadImage("menu.png");
	// создаем главное окно проекта
	cvNamedWindow("Проект OpenCV");
	cvShowImage("Проект OpenCV",menu);
	cvMoveWindow("Проект OpenCV",100,50);

	// получаем любую подключенную Web-камеру
    CvCapture *capture = cvCaptureFromCAM(CV_CAP_ANY);

    // частота кадров
	double fps = 18;
	// инициализация записи видео в файл; 4-буквенный код кодека для обработки видео, формируется макросом CV_FOURCC
	CvVideoWriter *writer = cvCreateVideoWriter("record.avi", CV_FOURCC('I','Y','U','V'), fps, cvSize(640, 480), 1);

	if (!capture)
		return 0;
	else
	{
		while(key != 27)
		{
			// получаем текущий кадр
			frame = cvQueryFrame(capture);
			// копируем его для обработки
			image = cvCloneImage(frame);
	
			// зум
			if(key=='+')
			{
					zx = zx + 4;
					zy = zy + 3;
			}
			if(key=='-')
			{
					zx = zx - 4;
					zy = zy - 3;
			}
			if(zx > 300)
			{
					zx = 300;
					zy = 225;
			}
			if(zx < 0)
			{
					zx = 0;
					zy = 0;
			}

			// задаем ширину и высоту ROI
			int zwidth = frame->width-2*zx; 
			int zheight = frame->height-2*zy;

			// устанавливаем ROI (Region Of Interest — интересующая область изображения)
			cvSetImageROI(frame, cvRect(zx,zy,zwidth,zheight));
			// копируем интересующую область в переменную image2
			image2 = cvCloneImage(frame); 
			// создаем пустое изображение размером 640x480
			tmp = cvCreateImage( cvSize(640, 480), frame->depth, frame->nChannels );
			// размещаем ROI на пустое изображение tmp
			cvResize(image2, tmp, 0);

			// сохраняем кадр в видео-файл
            cvWriteFrame(writer, tmp);

			// сбрасываем ROI
			cvResetImageROI(frame);

			// инициализация шрифта
			CvFont font;
			cvInitFont( &font, CV_FONT_HERSHEY_COMPLEX,1.0, 1.0, 0, 1, CV_AA);
			// используя шрифт выводим на картинку текст
			cvPutText(tmp, "press '+' to increase", cvPoint(150, 40), &font, CV_RGB(150, 0, 150) );
			cvPutText(tmp, "press '-' to reduce", cvPoint(165, 450), &font, CV_RGB(150, 0, 150) );

			// число пикселей данного цвета на изображении 
			uint colorCount[NUM_COLOR_TYPES] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };

			hsv = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); 
			cvCvtColor( image, hsv, CV_BGR2HSV );

			// картинки для хранения результатов
			dst = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); 
			dst2 = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
			color_indexes = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 1 ); //для хранения индексов цвета

			// для хранения RGB цветов
			CvScalar rgb_colors[NUM_COLOR_TYPES];

			int i=0, j=0, x=0, y=0;

			// обнуляем цвета
			for(i=0; i<NUM_COLOR_TYPES; i++) {
					rgb_colors[i] = cvScalarAll(0);
			}

			for (y=0; y<hsv->height; y++) {
					for (x=0; x<hsv->width; x++) {

							// получаем HSV-компоненты пикселя
							uchar H = CV_PIXEL(uchar, hsv, x, y)[0];        // Hue
							uchar S = CV_PIXEL(uchar, hsv, x, y)[1];        // Saturation
							uchar V = CV_PIXEL(uchar, hsv, x, y)[2];        // Value (Brightness)

							// определяем к какому цвету можно отнести данные значения
							int ctype = getPixelColorType(H, S, V);

							// устанавливаем этот цвет у отладочной картинки
							CV_PIXEL(uchar, dst, x, y)[0] = cCTHue[ctype];  // Hue
							CV_PIXEL(uchar, dst, x, y)[1] = cCTSat[ctype];  // Saturation
							CV_PIXEL(uchar, dst, x, y)[2] = cCTVal[ctype];  // Value

							// собираем RGB-составляющие
							rgb_colors[ctype].val[0] += CV_PIXEL(uchar, image, x, y)[0]; // B
							rgb_colors[ctype].val[1] += CV_PIXEL(uchar, image, x, y)[1]; // G
							rgb_colors[ctype].val[2] += CV_PIXEL(uchar, image, x, y)[2]; // R

							// сохраняем к какому типу относится цвет
							CV_PIXEL(uchar, color_indexes, x, y)[0] = ctype;

							// подсчитываем
							colorCount[ctype]++;
					}
			}

			// усреднение RGB-составляющих
			for(i=0; i<NUM_COLOR_TYPES; i++) {
					rgb_colors[i].val[0] /= colorCount[i];
					rgb_colors[i].val[1] /= colorCount[i];
					rgb_colors[i].val[2] /= colorCount[i];
			}

			// теперь загоним массив в вектор и отсортируем
			std::vector< std::pair< int, uint > > colors;
			colors.reserve(NUM_COLOR_TYPES);

			for(i=0; i<NUM_COLOR_TYPES; i++){
					std::pair< int, uint > color;
					color.first = i;
					color.second = colorCount[i];
					colors.push_back( color );
			}
		
			// сортируем
			std::sort( colors.begin(), colors.end(), colors_sort );

			// покажем цвета
			cvZero(dst2);
			int h = dst2->height;
			int w = dst2->width / RECT_COLORS_SIZE;
			for(i=0; i<RECT_COLORS_SIZE; i++ ){
					cvRectangle(dst2, cvPoint(i*w, 0), cvPoint(i*w+w, h), rgb_colors[colors[i].first], -1);
			}

			// покажем картинку в найденных цветах
			dst3 = cvCloneImage(image);
			for (y=0; y<dst3->height; y++) {
					for (x=0; x<dst3->width; x++) {
							int color_index = CV_PIXEL(uchar, color_indexes, x, y)[0];

							CV_PIXEL(uchar, dst3, x, y)[0] = rgb_colors[color_index].val[0];
							CV_PIXEL(uchar, dst3, x, y)[1] = rgb_colors[color_index].val[1];
							CV_PIXEL(uchar, dst3, x, y)[2] = rgb_colors[color_index].val[2];
					}
			}

			// конвертируем отладочную картинку обратно в RGB
			cvCvtColor( dst, dst, CV_HSV2BGR );

			cvSetMouseCallback("Проект OpenCV", ClickOnMenu, (void*) menu);

			if(flag_1 == 1)
			{
				cvNamedWindow("Веб-камера", CV_WINDOW_AUTOSIZE);
				cvShowImage("Веб-камера", image);
			}
			else cvDestroyWindow("Веб-камера");
			if(flag_2 == 1)
			{
				cvNamedWindow("Zoom", CV_WINDOW_AUTOSIZE);
				cvShowImage("Zoom", tmp);
			}
			else cvDestroyWindow("Zoom");
			if(flag_3 == 1)
			{
				cvNamedWindow("Обнаруженные цвета");
				cvShowImage("Обнаруженные цвета", dst2);
			}
			else cvDestroyWindow("Обнаруженные цвета");
			if(flag_4 == 1)
			{
				cvNamedWindow("Изображение в обнаруженных цветах");
				cvShowImage("Изображение в обнаруженных цветах", dst3);
			}
			else cvDestroyWindow("Изображение в обнаруженных цветах");
			if(flag_5 == 1)
			{
				cvNamedWindow("Из HSV в RGB");
				cvShowImage("Из HSV в RGB", dst);
			}
			else cvDestroyWindow("Из HSV в RGB");
	
			// освобождаем ресурсы
			cvReleaseImage(&hsv);
			cvReleaseImage(&dst);
			cvReleaseImage(&dst2);
			cvReleaseImage(&color_indexes);
			cvReleaseImage(&dst3);
			cvReleaseImage(&image);
			cvReleaseImage(&image2);
			cvReleaseImage(&tmp);

			if(flag_exit == 1)
			{
				cvReleaseCapture(&capture);
				cvReleaseVideoWriter(&writer); // закрываем видео-файл
				return 0;
			}

			// если нажали ESC - выходим из цикла
			key = cvWaitKey(1);
		}

		// освобождаем инициализированные ранее переменные
		cvReleaseCapture(&capture);
		cvReleaseVideoWriter(&writer);

	}
    return 0;
}
Example #14
0
// 参数: 
// img - 输入视频帧 // dst - 检测结果 
void Invade::update_mhi(IplImage* img, IplImage* dst, int diff_threshold)
{
	double timestamp = clock() / 100.; // get current time in seconds 时间戳 
	CvSize size = cvSize(img->width, img->height); // get current frame size,得到当前帧的尺寸 
	int i, idx1, idx2;
	IplImage* silh;
	IplImage* pyr = cvCreateImage(cvSize((size.width & -2) / 2, (size.height & -2) / 2), 8, 1);
	CvMemStorage *stor;
	CvSeq *cont;

	/*先进行数据的初始化*/
	if (!mhi || mhi->width != size.width || mhi->height != size.height)
	{
		if (buf == 0) //若尚没有初始化则分配内存给他 
		{
			buf = (IplImage**)malloc(N*sizeof(buf[0]));
			memset(buf, 0, N*sizeof(buf[0]));
		}

		for (i = 0; i < N; i++)
		{
			cvReleaseImage(&buf[i]);
			buf[i] = cvCreateImage(size, IPL_DEPTH_8U, 1);
			cvZero(buf[i]);// clear Buffer Frame at the beginning 
		}
		cvReleaseImage(&mhi);
		mhi = cvCreateImage(size, IPL_DEPTH_32F, 1);
		cvZero(mhi); // clear MHI at the beginning 
	} // end of if(mhi) 

	/*将当前要处理的帧转化为灰度放到buffer的最后一帧中*/
	cvCvtColor(img, buf[last], CV_BGR2GRAY); // convert frame to grayscale 

	/*设定帧的序号*/
	idx1 = last;
	idx2 = (last + 1) % N; // index of (last - (N-1))th frame 
	last = idx2;

	// 做帧差 
	silh = buf[idx2];//差值的指向idx2 
	cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames 

	// 对差图像做二值化 
	cvThreshold(silh, silh, 50, 255, CV_THRESH_BINARY); //threshold it,二值化 

	//去掉超时的影像以更新运动历史图像
	cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI 

	cvConvert(mhi, dst);//将mhi转化为dst,dst=mhi 

	// 中值滤波,消除小的噪声 
	cvSmooth(dst, dst, CV_MEDIAN, 3, 0, 0, 0);

	cvPyrDown(dst, pyr, CV_GAUSSIAN_5x5);// 向下采样,去掉噪声,图像是原图像的四分之一 
	cvDilate(pyr, pyr, 0, 1); // 做膨胀操作,消除目标的不连续空洞 
	cvPyrUp(pyr, dst, CV_GAUSSIAN_5x5);// 向上采样,恢复图像,图像是原图像的四倍 

	// 下面的程序段用来找到轮廓 
	// Create dynamic structure and sequence. 
	stor = cvCreateMemStorage(0);
	cont = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), stor);

	// 找到所有轮廓 
	cvFindContours(dst, stor, &cont, sizeof(CvContour),
		CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

	// 直接使用CONTOUR中的矩形来画轮廓 
	for (; cont; cont = cont->h_next)
	{
		CvRect r = ((CvContour*)cont)->rect;
		if (r.height * r.width > CONTOUR_MAX_AERA) // 面积小的方形抛弃掉 
		{
			cvRectangle(img, cvPoint(r.x, r.y),
				cvPoint(r.x + r.width, r.y + r.height),
				CV_RGB(255, 0, 0), 1, CV_AA, 0);
		}
	}
	// free memory 
	cvReleaseMemStorage(&stor);
	cvReleaseImage(&pyr);
}
Example #15
0
// Function to detect and draw any faces that is present in an image
void detect_and_draw( IplImage* img )
{

    // Create memory for calculations
    static CvMemStorage* storage = 0;

    // Create a new Haar classifier
    static CvHaarClassifierCascade* cascade = 0;

    static int scale = 1;

    // Create a new image based on the input image
    IplImage* temp = cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );

    // Create two points to represent the face locations
    CvPoint pt1, pt2;
    static int i;

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return;
    }
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);

    // Create a new named window with title: result
    //cvNamedWindow( "result", CV_WINDOW_AUTOSIZE );

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

    // Find whether the cascade is loaded, to find the faces. If yes, then:
    if( cascade )
    {
		//printf("go cascade\n");
        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence
        CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
                                            1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(40, 40) );
		//printf("ici\n");
        // Loop the number of faces found.
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
			//printf("Et d'un\n");
           // Create a new rectangle for drawing the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

            // Find the dimensions of the face,and scale it if necessary
            pt1.x = r->x*scale;
            pt2.x = (r->x+r->width)*scale;
            pt1.y = r->y*scale;
            pt2.y = (r->y+r->height)*scale;

            // Draw the rectangle in the input image
            cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
          
        }
    }

	//printf("nombre de visages: %d\n",i);
    // Show the image in the window named "result"
    //cvShowImage( "result", img );
	
    // Release the temp image created.
    //cvReleaseImage( &temp );
}
Example #16
0
int _tmain(int argc, _TCHAR* argv[])
{
	TVAInitParams params;
	TVAHumanInit hparams;
	TVAHumanResult result;// 
	result.objects = new TVAHumanBlob[cNumObjects];
	result.num     = 0;
	if (!LoadInitParams("init.xml", &params))
	{
		printf("Cannot load params.\n");
		return 0;
	}

	hparams.maxHeight = 90;
	hparams.maxWidth  = 30;
	hparams.minHeight = 30;
	hparams.minWidth  = 10;
	hparams.numObects = cNumObjects;

	HANDLE hHuman = humanCreate(&params, &hparams);
	if (!hHuman)
		return 0;

	CvCapture* capture = NULL;

	if (argc < 2)
		capture = cvCaptureFromCAM(0);
	else
		capture = cvCaptureFromFile(argv[1]);

	if (capture == NULL)
	{
		printf("%s\n", "Cannot open camera.");
		return -1;
	}


	for (;;) 
	{
		IplImage* frame = NULL;
		frame = cvQueryFrame(capture);
		if (!frame)
			break;

		humanProcess(hHuman, frame->width, frame->height, frame->nChannels, (unsigned char*)frame->imageData, &result);
		if (result.num > 0)
		{

			for (int i = 0; i < result.num; i++)
			{
				
				CvScalar color;
				color = CV_RGB(200, 200, 200);

				CvPoint p1,p2;

				p1.x = result.objects[i].XPos;
				p1.y = result.objects[i].YPos;

				p2.x = p1.x + result.objects[i].Width;
				p2.y = p1.y + result.objects[i].Height;

				cvRectangle(frame, p1, p2, color);
			}
		}

		cvShowImage(_MODULE_, frame);

		int c;
		c = cvWaitKey(10);
		if ((char)c == 27)
			break;

	}
	humanRelease(&hHuman);
	free(result.objects);
	cvReleaseCapture(&capture);
	return 0;
}
void process_image(){







///////////////////////////////////////////////////////
//////////////////// PUPIL/////////////////////////////
///////////////////////////////////////////////////////

int numBins = 256;
float range[] = {0, 255};
float *ranges[] = { range };
 
 CvHistogram *hist = cvCreateHist(1, &numBins, CV_HIST_ARRAY, ranges, 1);
 cvClearHist(hist);


	cvCalcHist(&smooth, hist, 0, 0);
    IplImage* imgHist = DrawHistogram(hist,1,1);
    cvClearHist(hist);
	

//cvShowImage("hist", imgHist);



cvThreshold(smooth,pupil,50,255,CV_THRESH_BINARY);
//cvShowImage( "pupi_binary",pupil);

cvCanny(pupil,pedge,40,50);
//cvShowImage( "pupil_edge",pedge);


//////////////////////////////////////////////////////////
//////////////////////IRIS////////////////////////////////
//////////////////////////////////////////////////////////

//cvEqualizeHist(smooth,smooth);
//cvShowImage("Equalized",smooth);

cvThreshold(smooth,iris,100,255,CV_THRESH_BINARY); //115
//cvShowImage( "iris_binary",iris);


//cvSobel(iris,iedge,1,0,3);
cvCanny(iris,iedge,1,255);
//cvShowImage( "iris_edge",iedge);


/////////////////////////////////////////////////////////
///////////////////////Eyelids///////////////////////////
/////////////////////////////////////////////////////////



cvThreshold(smooth,eyelid_mask,150,255,CV_THRESH_OTSU);
cvNot(eyelid_mask,eyelid_mask);
//cvShowImage("eyelid",eyelid_mask);



//cvAdaptiveThreshold(smooth,contour,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,9,1);

//cvThreshold(smooth,contour,130,255,CV_THRESH_BINARY);
//cvShowImage( "contour",contour);


//CvSeq* firstContour = NULL;
//CvMemStorage* cstorage = cvCreateMemStorage(0);
//cvFindContours(con, cstorage, &firstContour,sizeof(CvContour), CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
//cvDrawContours(dst,firstContour,CV_RGB(0,255,0),CV_RGB(0,0,255),10,2,8);



CvMemStorage* storage_pupil = cvCreateMemStorage(0);

CvSeq* presults = cvHoughCircles(pedge,storage_pupil,CV_HOUGH_GRADIENT,2,src->width,255,1);
for( int i = 0; i < presults->total; i++ )
{
float* p = (float*) cvGetSeqElem( presults, i );
CvPoint pt = cvPoint( cvRound( p[0] ),cvRound( p[1] ) );

xp=cvRound( p[0] );
yp=cvRound( p[1] );
rp=p[2];

cvCircle(dst,pt,cvRound( p[2] ),CV_RGB(0,255,255),1,400);


xroi= xp-shift;
yroi= yp-shift;

cvRectangle(dst,cvPoint(( p[0] )-shift,p[1]-shift),cvPoint(( p[0] )+shift,p[1]+shift),CV_RGB(255,0,255), 1);

CvRect roi= cvRect(xroi  ,yroi,shift*2,shift*2);



cvSetImageROI( iedge, roi );


//cvShowImage("ROI",iedge);


}
////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////




///////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////

CvMemStorage* storage_iris = cvCreateMemStorage(0);

CvSeq* iresults = cvHoughCircles(iedge,storage_iris,CV_HOUGH_GRADIENT,2,src->width,1,50,50);
for( int i = 0; i < iresults->total; i++ )
{
float* p = (float*) cvGetSeqElem( iresults, i );

CvPoint pt = cvPoint( cvRound( p[0] )+xroi,cvRound( p[1] )+yroi );
cvCircle(dst,pt,cvRound( p[2] ),CV_RGB(255,0,0),1,400);


xi=cvRound( p[0] )+xroi;
yi=cvRound( p[1] )+yroi;
ri=(p[2]);


cvCircle(iris_mask,pt,cvRound( p[2] ),CV_RGB(255, 255, 255),-1, 8, 0);
//cvShowImage("iris_mask",iris_mask);


}
///////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
///////////////////////////////////////////


cvResetImageROI(iedge);

cvAnd(dst,dst,res,iris_mask);
//cvShowImage("iris_mask",res);

cvAnd(res,res, mask, eyelid_mask);
//cvShowImage("Mask",mask);



//cvLogPolar(mask,finalres,cvPoint2D32f (xp,yp),100, CV_INTER_LINEAR  );
//cvShowImage("Final Result",finalres);


/////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////
/*




*/



}
Example #18
0
// chain function; this function does the actual processing
static GstFlowReturn
gst_surf_tracker_chain(GstPad *pad, GstBuffer *buf) {
    GstSURFTracker *filter;
    GstClockTime    timestamp;

    // sanity checks
    g_return_val_if_fail(pad != NULL, GST_FLOW_ERROR);
    g_return_val_if_fail(buf != NULL, GST_FLOW_ERROR);

    filter = GST_SURF_TRACKER(GST_OBJECT_PARENT(pad));
    filter->image->imageData = (char*) GST_BUFFER_DATA(buf);

    // Create the gray image for the surf 'features' search process
    cvCvtColor(filter->image, filter->gray, CV_BGR2GRAY);
    ++filter->frames_processed;
    timestamp = GST_BUFFER_TIMESTAMP(buf);

    // If exist stored_objects: search matching, update, cleaning
    if ((filter->stored_objects != NULL) && (filter->stored_objects->len > 0)) {
        CvMemStorage *surf_image_mem_storage;
        CvSeq        *surf_image_keypoints, *surf_image_descriptors;
        guint         i;
        gint          j;

        // Update the match set 'features' for each object
        surf_image_mem_storage = cvCreateMemStorage(0);

        // Search 'features' in full image
        surf_image_keypoints = surf_image_descriptors = NULL;
        cvExtractSURF(filter->gray, NULL, &surf_image_keypoints, &surf_image_descriptors,
                      surf_image_mem_storage, filter->params, 0);

        for (i = 0; i < filter->stored_objects->len; ++i) {
            InstanceObject *object;
            GArray         *pairs;

            object = &g_array_index(filter->stored_objects, InstanceObject, i);
            pairs  = g_array_new(FALSE, FALSE, sizeof(IntPair));

            findPairs(object->surf_object_keypoints, object->surf_object_descriptors,
                      surf_image_keypoints, surf_image_descriptors, pairs);

            // if match, update object
            if (pairs->len && (float) pairs->len / object->surf_object_descriptors->total >= MIN_MATCH_OBJECT) {
                object->range_viewed++;
                object->last_frame_viewed = filter->frames_processed;
                object->timestamp         = timestamp;

                if (object->surf_object_keypoints_last_match != NULL)
                    cvClearSeq(object->surf_object_keypoints_last_match);
                object->surf_object_keypoints_last_match = getMatchPoints(surf_image_keypoints, pairs, 1, object->mem_storage);

                if (object->surf_object_descriptors_last_match != NULL)
                    cvClearSeq(object->surf_object_descriptors_last_match);
                object->surf_object_descriptors_last_match = getMatchPoints(surf_image_descriptors, pairs, 1, object->mem_storage);

                // Estimate rect of objects localized
                object->rect_estimated = rectDisplacement(object->surf_object_keypoints, surf_image_keypoints, pairs, object->rect, PAIRS_PERC_CONSIDERATE);
            }

            g_array_free(pairs, TRUE);
        }

        if (surf_image_keypoints != NULL) cvClearSeq(surf_image_keypoints);
        if (surf_image_descriptors != NULL) cvClearSeq(surf_image_descriptors);
        cvReleaseMemStorage(&surf_image_mem_storage);

        // Clean old objects
        for (j = filter->stored_objects->len - 1; j >= 0; --j) {
            InstanceObject *object;

            object = &g_array_index(filter->stored_objects, InstanceObject, j);
            if ((filter->frames_processed - object->last_frame_viewed > DELOBJ_NFRAMES_IS_OLD) ||
                (filter->frames_processed != object->last_frame_viewed && object->range_viewed < DELOBJ_COMBOFRAMES_IS_IRRELEVANT)) {
                if (object->surf_object_keypoints != NULL) cvClearSeq(object->surf_object_keypoints);
                if (object->surf_object_descriptors != NULL) cvClearSeq(object->surf_object_descriptors);
                if (object->surf_object_keypoints_last_match != NULL) cvClearSeq(object->surf_object_keypoints_last_match);
                if (object->surf_object_descriptors_last_match != NULL) cvClearSeq(object->surf_object_descriptors_last_match);
                cvReleaseMemStorage(&object->mem_storage);
                g_array_remove_index_fast(filter->stored_objects, j);
            }
        }

    } // if any object exist

    // Process all haar rects
    if ((filter->rect_array != NULL) && (filter->rect_array->len > 0)) {
        guint i, j;

        for (i = 0; i < filter->rect_array->len; ++i) {
            CvRect rect = g_array_index(filter->rect_array, CvRect, i);

            // If already exist in 'stored_objects', update features. Else save
            // as new.
            for (j = 0; j < filter->stored_objects->len; ++j) {
                InstanceObject *object;

                object = &g_array_index(filter->stored_objects, InstanceObject, j);

                // It is considered equal if the "centroid match features" is inner
                // haar rect AND max area deviation is PERC_RECT_TO_SAME_OBJECT
                if (pointIntoRect(rect, (object->surf_object_keypoints_last_match != NULL) ? surfCentroid(object->surf_object_keypoints_last_match, cvPoint(0, 0)) : surfCentroid(object->surf_object_keypoints, cvPoint(0, 0))) &&
                    ((float) MIN((object->rect.width * object->rect.height), (rect.width * rect.height)) / (float) MAX((object->rect.width * object->rect.height), (rect.width * rect.height)) >= PERC_RECT_TO_SAME_OBJECT)) {

                    // Update the object features secound the new body rect
                    cvSetImageROI(filter->gray, rect);
                    cvExtractSURF(filter->gray, NULL, &object->surf_object_keypoints, &object->surf_object_descriptors,
                                  object->mem_storage, filter->params, 0);
                    cvResetImageROI(filter->gray);
                    object->rect = object->rect_estimated = rect;
                    object->last_body_identify_timestamp = timestamp;

                    break;
                }
            }

            // If new, create object and append in stored_objects
            if (j >= filter->stored_objects->len) {
                InstanceObject object;

                object.surf_object_keypoints   = 0;
                object.surf_object_descriptors = 0;
                object.mem_storage             = cvCreateMemStorage(0);

                cvSetImageROI(filter->gray, rect);
                cvExtractSURF(filter->gray, NULL, &object.surf_object_keypoints, &object.surf_object_descriptors,
                              object.mem_storage, filter->params, 0);
                cvResetImageROI(filter->gray);

                if (object.surf_object_descriptors && object.surf_object_descriptors->total > 0) {
                    object.id                                 = filter->static_count_objects++;
                    object.last_frame_viewed                  = filter->frames_processed;
                    object.range_viewed                       = 1;
                    object.rect                               = object.rect_estimated               = rect;
                    object.timestamp                          = object.last_body_identify_timestamp = timestamp;
                    object.surf_object_keypoints_last_match   = NULL;
                    object.surf_object_descriptors_last_match = NULL;

                    g_array_append_val(filter->stored_objects, object);
                }
            } // new
        }
    }

    // Put the objects found in the frame in gstreamer pad
    if ((filter->stored_objects != NULL) && (filter->stored_objects->len > 0)) {
        guint i;

        for (i = 0; i < filter->stored_objects->len; ++i) {
            InstanceObject object = g_array_index(filter->stored_objects, InstanceObject, i);

            // 'Continue' whether the object is not found in this frame
            if (object.timestamp == timestamp) {
                TrackedObject *tracked_object;
                GstEvent      *event;
                CvRect         rect;

                rect = ((object.last_body_identify_timestamp == timestamp) ? object.rect : object.rect_estimated);

                if (filter->verbose) {
                    GST_INFO("[object #%d rect] x: %d, y: %d, width: %d, height: %d\n", object.id, rect.x, rect.y, rect.width, rect.height);
                    // drawSurfPoints(object.surf_object_keypoints, cvPoint(object.rect.x, object.rect.y), filter->image, PRINT_COLOR, 0);
                    // drawSurfPoints(object.surf_object_keypoints_last_match, cvPoint(object.rect.x, object.rect.y), filter->image, PRINT_COLOR, 1);
                }

                if (filter->display_features) {
                    drawSurfPoints(object.surf_object_keypoints_last_match, cvPoint(0, 0), filter->image, PRINT_COLOR, 1);
                }

                if (filter->display) {
                    char *label;
                    float font_scaling;

                    font_scaling = ((filter->image->width * filter->image->height) > (320 * 240)) ? 0.5f : 0.3f;

                    cvRectangle(filter->image, cvPoint(rect.x, rect.y), cvPoint(rect.x + rect.width, rect.y + rect.height),
                                PRINT_COLOR, ((object.last_body_identify_timestamp == timestamp) ? 2 : 1), 8, 0);
                    label = g_strdup_printf("OBJ#%i", object.id);
                    printText(filter->image, cvPoint(rect.x + (rect.width / 2), rect.y + (rect.height / 2)), label, PRINT_COLOR, font_scaling, 1);
                    g_free(label);
                }

                // allocate and initialize 'TrackedObject' structure
                tracked_object = tracked_object_new();
                tracked_object->id        = g_strdup_printf("PERSON#%d", object.id);
                tracked_object->type      = TRACKED_OBJECT_DYNAMIC;
                tracked_object->height    = rect.height;
                tracked_object->timestamp = timestamp;

                // add the points that the define the lower part of the object (i.e,
                // the lower horizontal segment of the rectangle) as the objects perimeter
                tracked_object_add_point(tracked_object, rect.x, rect.y + rect.height);
                tracked_object_add_point(tracked_object, rect.x + rect.width, rect.y + rect.height);

                // send downstream event
                event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM,
                                             tracked_object_to_structure(tracked_object, "tracked-object"));
                gst_pad_push_event(filter->srcpad, event);
            }
        }
    }

    // Clean body rects
    g_array_free(filter->rect_array, TRUE);
    filter->rect_array = g_array_sized_new(FALSE, FALSE, sizeof(CvRect), 1);

    // Draw number of objects stored
    if (filter->display) {
        char *label = g_strdup_printf("N_STORED_OBJS: %3i", filter->stored_objects->len);
        printText(filter->image, cvPoint(0, 0), label, PRINT_COLOR, .5, 1);
        g_free(label);
    }

    gst_buffer_set_data(buf, (guint8*) filter->image->imageData, (guint) filter->image->imageSize);
    return gst_pad_push(filter->srcpad, buf);
}
Example #19
0
//
// Transform
// Transform the  sample 'in place'
//
HRESULT CKalmTrack::Transform(IMediaSample *pSample)
{
    BYTE*   pData;
    CvImage image;
    
    pSample->GetPointer(&pData);
    
    AM_MEDIA_TYPE* pType = &m_pInput->CurrentMediaType();
    VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER *) pType->pbFormat;
    
    // Get the image properties from the BITMAPINFOHEADER
    CvSize size = cvSize( pvi->bmiHeader.biWidth, pvi->bmiHeader.biHeight );
    int stride = (size.width * 3 + 3) & -4;

    cvInitImageHeader( &image, size, IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, 4 );
    cvSetImageData( &image, pData,stride );

    if(IsTracking == false)
    {
        if(IsInit == false)
        {
            CvPoint p1, p2;
            // Draw box
            p1.x = cvRound( size.width * m_params.x );
            p1.y = cvRound( size.height * m_params.y );

            p2.x = cvRound( size.width * (m_params.x + m_params.width));
            p2.y = cvRound( size.height * (m_params.y + m_params.height));

            CheckBackProject( &image );

            cvRectangle( &image, p1, p2, -1, 1 );
        }
        else
        {
            m_object.x = cvRound( size.width * m_params.x );
            m_object.y = cvRound( size.height * m_params.y );
            m_object.width = cvRound( size.width * m_params.width );
            m_object.height = cvRound( size.height * m_params.height );
            ApplyCamShift( &image, true );

            CheckBackProject( &image );

            IsTracking = true;
        }
    }
    else
    {
        cvKalmanUpdateByTime(Kalman);
        m_object.x = cvRound( Kalman->PriorState[0]-m_object.width*0.5);
        m_object.y = cvRound( Kalman->PriorState[2]-m_object.height*0.5 );
        
        ApplyCamShift( &image, false );

        CheckBackProject( &image );

        cvRectangle( &image,
                     cvPoint( m_object.x, m_object.y ),
                     cvPoint( m_object.x + m_object.width, m_object.y + m_object.height ),
                     -1, 1 );

        Rectang(&image,m_Indicat1,-1);
        m_X.x = 10;
        m_X.y = 10;
        m_X.width=50*m_Old.x/size.width;
        m_X.height =10;
        Rectang(&image,m_X,CV_RGB(0,0,255));
        m_Y.x = 10;
        m_Y.y = 10;
        m_Y.width=10;
        m_Y.height = 50*m_Old.y/size.height;
        Rectang(&image,m_Y,CV_RGB(255,0,0));
        m_Indicat2.x = 0; 
        m_Indicat2.y = size.height-50;
        m_Indicat2.width = 50;
        m_Indicat2.height = 50;
        Rectang(&image,m_Indicat2,-1);
        float Norm = cvSqrt(Measurement[1]*Measurement[1]+Measurement[3]*Measurement[3]);
        int VXNorm = (fabs(Measurement[1])>5)?(int)(12*Measurement[1]/Norm):0;
        int VYNorm = (fabs(Measurement[3])>5)?(int)(12*Measurement[3]/Norm):0;
        CvPoint pp1 = {25,size.height-25};
        CvPoint pp2 = {25+VXNorm,size.height-25+VYNorm};
        cvLine(&image,pp1,pp2,CV_RGB(0,0,0),3);
        /*CvPoint pp1 = {25,size.height-25};
        double angle = atan2( Measurement[3], Measurement[1] );
        CvPoint pp2 = {cvRound(25+12*cos(angle)),cvRound(size.height-25-12*sin(angle))};
        cvLine(&image,pp1,pp2,0,3);*/
    }

    cvSetImageData( &image, 0, 0 );

    return NOERROR;
} // Transform
Example #20
0
/* chain function
 * this function does the actual processing
 */
static GstFlowReturn
gst_template_match_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstTemplateMatch *filter;
  CvPoint best_pos;
  double best_res;
  GstMapInfo info;
  GstMessage *m = NULL;

  filter = GST_TEMPLATE_MATCH (parent);

  if ((!filter) || (!buf)) {
    return GST_FLOW_OK;
  }
  GST_LOG_OBJECT (filter, "Buffer size %u", (guint) gst_buffer_get_size (buf));

  buf = gst_buffer_make_writable (buf);
  gst_buffer_map (buf, &info, GST_MAP_READWRITE);
  filter->cvImage->imageData = (char *) info.data;

  GST_OBJECT_LOCK (filter);
  if (filter->cvTemplateImage && !filter->cvDistImage) {
    if (filter->cvTemplateImage->width > filter->cvImage->width) {
      GST_WARNING ("Template Image is wider than input image");
    } else if (filter->cvTemplateImage->height > filter->cvImage->height) {
      GST_WARNING ("Template Image is taller than input image");
    } else {

      GST_DEBUG_OBJECT (filter, "cvCreateImage (Size(%d-%d+1,%d) %d, %d)",
          filter->cvImage->width, filter->cvTemplateImage->width,
          filter->cvImage->height - filter->cvTemplateImage->height + 1,
          IPL_DEPTH_32F, 1);
      filter->cvDistImage =
          cvCreateImage (cvSize (filter->cvImage->width -
              filter->cvTemplateImage->width + 1,
              filter->cvImage->height - filter->cvTemplateImage->height + 1),
          IPL_DEPTH_32F, 1);
      if (!filter->cvDistImage) {
        GST_WARNING ("Couldn't create dist image.");
      }
    }
  }
  if (filter->cvTemplateImage && filter->cvDistImage) {
    GstStructure *s;

    gst_template_match_match (filter->cvImage, filter->cvTemplateImage,
        filter->cvDistImage, &best_res, &best_pos, filter->method);

    s = gst_structure_new ("template_match",
        "x", G_TYPE_UINT, best_pos.x,
        "y", G_TYPE_UINT, best_pos.y,
        "width", G_TYPE_UINT, filter->cvTemplateImage->width,
        "height", G_TYPE_UINT, filter->cvTemplateImage->height,
        "result", G_TYPE_DOUBLE, best_res, NULL);

    m = gst_message_new_element (GST_OBJECT (filter), s);

    if (filter->display) {
      CvPoint corner = best_pos;
      CvScalar color;
      if (filter->method == CV_TM_SQDIFF_NORMED
          || filter->method == CV_TM_CCORR_NORMED
          || filter->method == CV_TM_CCOEFF_NORMED) {
        /* Yellow growing redder as match certainty approaches 1.0.  This can
           only be applied with method == *_NORMED as the other match methods
           aren't normalized to be in range 0.0 - 1.0 */
        color = CV_RGB (255, 255 - pow (255, best_res), 32);
      } else {
        color = CV_RGB (255, 32, 32);
      }

      buf = gst_buffer_make_writable (buf);

      corner.x += filter->cvTemplateImage->width;
      corner.y += filter->cvTemplateImage->height;
      cvRectangle (filter->cvImage, best_pos, corner, color, 3, 8, 0);
    }

  }
  GST_OBJECT_UNLOCK (filter);

  if (m) {
    gst_element_post_message (GST_ELEMENT (filter), m);
  }
  return gst_pad_push (filter->srcpad, buf);
}
Example #21
0
void VisuoThread::updatePFTracker()
{
    Vector *trackVec=pftInPort.read(false);

    Vector stereo;
    if(trackVec!=NULL && trackVec->size()==12)
    {
        //must check if the tracker has gone mad.
        if(checkTracker(trackVec))
        {
            trackMutex.wait();
            stereoTracker.vec=*trackVec;
            trackMutex.post();

            stereo.resize(4);
            stereo[0]=stereoTracker.vec[0];
            stereo[1]=stereoTracker.vec[1];
            stereo[2]=stereoTracker.vec[6];
            stereo[3]=stereoTracker.vec[7];

            if(trackMode==MODE_TRACK_TEMPLATE)
                stereo_target.set(stereo);
        }
        else
        {
            trackMutex.wait();
            stereoTracker.vec.clear();
            stereoTracker.side=0;
            trackMutex.post();
        }
    }



    imgMutex.wait();
    if(img[LEFT]!=NULL && img[RIGHT]!=NULL)
    {
        Image drawImg[2];
        drawImg[LEFT]=*img[LEFT];
        drawImg[RIGHT]=*img[RIGHT];

        if(stereoTracker.vec.size()==12)
        {

            cvCircle(drawImg[LEFT].getIplImage(),cvPoint(cvRound(stereoTracker.vec[0]),cvRound(stereoTracker.vec[1])),3,cvScalar(0,255),3);
            cvRectangle(drawImg[LEFT].getIplImage(),cvPoint(cvRound(stereoTracker.vec[2]),cvRound(stereoTracker.vec[3])),
                                                 cvPoint(cvRound(stereoTracker.vec[4]),cvRound(stereoTracker.vec[5])),cvScalar(0,255),3);

            cvCircle(drawImg[RIGHT].getIplImage(),cvPoint(cvRound(stereoTracker.vec[6]),cvRound(stereoTracker.vec[7])),3,cvScalar(0,255),3);
            cvRectangle(drawImg[RIGHT].getIplImage(),cvPoint(cvRound(stereoTracker.vec[8]),cvRound(stereoTracker.vec[9])),
                                                 cvPoint(cvRound(stereoTracker.vec[10]),cvRound(stereoTracker.vec[11])),cvScalar(0,255),3);

            Bottle v;
            v.clear();
            Bottle &vl=v.addList();
            vl.addInt(cvRound(stereoTracker.vec[0]));
            vl.addInt(cvRound(stereoTracker.vec[1]));
            vl.addInt(stereoTracker.side);
            Bottle &vr=v.addList();
            vr.addInt(cvRound(stereoTracker.vec[6]));
            vr.addInt(cvRound(stereoTracker.vec[7]));
            vr.addInt(stereoTracker.side);

            boundMILPort.write(v);
        }


        if(newImage[LEFT])
            outPort[LEFT].write(drawImg[LEFT]);
        
        if(newImage[RIGHT])
            outPort[RIGHT].write(drawImg[RIGHT]);

        //avoid writing multiple times the same image
        newImage[LEFT]=false;
        newImage[RIGHT]=false;
    }
    imgMutex.post();
}
Example #22
0
int main( int argc, char** argv )
{
    const char *pstrWindowsSrcTitle = "Original picture";
    const char *pstrWindowsOutLineTitle = "Outline picture";

    const int IMAGE_WIDTH = 400;
    const int IMAGE_HEIGHT = 200;

    // 创建图像
    IplImage *pSrcImage = cvCreateImage(cvSize(IMAGE_WIDTH, IMAGE_HEIGHT), IPL_DEPTH_8U, 3);

    // 填充成白色
    cvRectangle(pSrcImage, cvPoint(0, 0), cvPoint(pSrcImage->width, pSrcImage->height), CV_RGB(255, 255, 255), CV_FILLED);

    // 画圆
    CvPoint ptCircleCenter = cvPoint(IMAGE_WIDTH / 4, IMAGE_HEIGHT / 2);
    int nRadius = 80;
    cvCircle(pSrcImage, ptCircleCenter, nRadius, CV_RGB(255, 255, 0), CV_FILLED);
    ptCircleCenter = cvPoint(IMAGE_WIDTH / 4, IMAGE_HEIGHT / 2);
    nRadius = 30;
    cvCircle(pSrcImage, ptCircleCenter, nRadius, CV_RGB(255, 255, 255), CV_FILLED);

    // 画矩形
    CvPoint ptLeftTop = cvPoint(IMAGE_WIDTH / 2 + 20, 20);
    CvPoint ptRightBottom = cvPoint(IMAGE_WIDTH - 20, IMAGE_HEIGHT - 20);
    cvRectangle(pSrcImage, ptLeftTop, ptRightBottom, CV_RGB(0, 255, 255), CV_FILLED);
    ptLeftTop = cvPoint(IMAGE_WIDTH / 2 + 60, 40);
    ptRightBottom = cvPoint(IMAGE_WIDTH - 60, IMAGE_HEIGHT - 40);
    cvRectangle(pSrcImage, ptLeftTop, ptRightBottom, CV_RGB(255, 255, 255), CV_FILLED);

    // 显示原图
    cvNamedWindow(pstrWindowsSrcTitle, CV_WINDOW_AUTOSIZE);
    cvShowImage(pstrWindowsSrcTitle, pSrcImage);

    // 转为灰度图
    IplImage *pGrayImage =  cvCreateImage(cvGetSize(pSrcImage), IPL_DEPTH_8U, 1);
    cvCvtColor(pSrcImage, pGrayImage, CV_BGR2GRAY);

    // 转为二值图
    IplImage *pBinaryImage = cvCreateImage(cvGetSize(pGrayImage), IPL_DEPTH_8U, 1);
    cvThreshold(pGrayImage, pBinaryImage, 250, 255, CV_THRESH_BINARY);


    // 检索轮廓并返回检测到的轮廓的个数
    CvMemStorage *pcvMStorage = cvCreateMemStorage();
    CvSeq *pcvSeq = NULL;
    cvFindContours(pBinaryImage, pcvMStorage, &pcvSeq, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

    // 画轮廓图
    IplImage *pOutlineImage = cvCreateImage(cvGetSize(pSrcImage), IPL_DEPTH_8U, 3);
    int nLevels = 5;

    // 填充成白色
    cvRectangle(pOutlineImage, cvPoint(0, 0), cvPoint(pOutlineImage->width, pOutlineImage->height), CV_RGB(255, 255, 255), CV_FILLED);
    cvDrawContours(pOutlineImage, pcvSeq, CV_RGB(255,0,0), CV_RGB(0,255,0), nLevels, 2);

    // 显示轮廓图
    cvNamedWindow(pstrWindowsOutLineTitle, CV_WINDOW_AUTOSIZE);
    cvShowImage(pstrWindowsOutLineTitle, pOutlineImage);

    cvWaitKey(0);

    cvReleaseMemStorage(&pcvMStorage);

    cvDestroyWindow(pstrWindowsSrcTitle);
    cvDestroyWindow(pstrWindowsOutLineTitle);
    cvReleaseImage(&pSrcImage);
    cvReleaseImage(&pGrayImage);
    cvReleaseImage(&pBinaryImage);
    cvReleaseImage(&pOutlineImage);

    return 0;
}
Example #23
0
 int main() {
  CvPoint pt1b,pt2b, pt1t,pt2t,ptarry[4];
  int tempwidth,tempheight;
  CvRect regt,rectROIbot,rectROItop;
  rectROItop=cvRect(0,0,80,10);
  rectROIbot=cvRect(0,50,80,10);
  CvPoint b_cir_center,t_cir_center;
  CvPoint frame_center;
  CvPoint A,B,C,D;
  CvPoint temp;
  double angle,spinsize;
  int cir_radius=1; 
  int frame_width=160, frame_height=120;
  IplImage* frame;
  IplImage* threshframe;
  IplImage* hsvframe;
  IplImage* threshframebot;
  IplImage* threshframetop;
  IplImage* modframe;
  IplImage* dilframetop;
  IplImage* dilframebot;
  int moddiv=2,seq=0,seqdiv=2;
  int release=0, rmax=100;
  int modfheight, modfwidth;
  unsigned char sendBuf;
   CvCapture* capture = cvCaptureFromCAM( -1 );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
  cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   while ( 1 ) {
     // Get one frame
      frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }

     modfheight = frame->height;
     modfwidth = frame->width;
      modframe = cvCreateImage(cvSize((int)(modfwidth/moddiv),(int)(modfheight/moddiv)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
      hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
      
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)

      threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
    // cvInRangeS(hsvframe,cvScalar(0, 180, 140),cvScalar(15, 230, 235),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) red
     cvInRangeS(hsvframe,cvScalar(100, 50, 50),cvScalar(150, 110, 110),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);

      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
 
//////////////////////////////////////////////////////////////////////////////////////////
    if (seq==0) {
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);
     dilframebot = cvCreateImage(cvGetSize(threshframebot),8,1);
     cvDilate(threshframebot,dilframebot,NULL,2); //cvDilate(input frame,

   //  tempwidth=cvGetSize(dilframebot).width;
   //  tempheight=cvGetSize(dilframebot).height;
   //  printf("dilframe: %d, %d \n",tempwidth,tempheight);
     CBlobResult blobs_bot;
     blobs_bot = CBlobResult(dilframebot,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_bot.Filter(blobs_bot,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_bot;
     blobs_bot.GetNthBlob(CBlobGetArea(),0,biggestblob_bot); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1b.x = biggestblob_bot.MinX()*moddiv;
     pt1b.y = biggestblob_bot.MinY()*moddiv+100;
     pt2b.x = biggestblob_bot.MaxX()*moddiv;
     pt2b.y = biggestblob_bot.MaxY()*moddiv+100;
     b_cir_center.x=(pt1b.x+pt2b.x)/2;
     b_cir_center.y=(pt1b.y+pt2b.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////////
    if(seq==seqdiv){
      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
      dilframetop = cvCreateImage(cvGetSize(threshframetop),8,1);
     cvDilate(threshframetop,dilframetop,NULL,2); //cvDilate(input frame,
     CBlobResult blobs_top;
     blobs_top = CBlobResult(dilframetop,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_top.Filter(blobs_top,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_top;
     blobs_top.GetNthBlob(CBlobGetArea(),0,biggestblob_top); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1t.x = biggestblob_top.MinX()*moddiv;
     pt1t.y = biggestblob_top.MinY()*moddiv;
     pt2t.x = biggestblob_top.MaxX()*moddiv;
     pt2t.y = biggestblob_top.MaxY()*moddiv;
     t_cir_center.x=(pt1t.x+pt2t.x)/2;
     t_cir_center.y=(pt1t.y+pt2t.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////
   if(seq==seqdiv+2) {
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
     A.x=frame_center.x-4;
     A.y=frame_center.y;
     B.x=frame_center.x+4;
     B.y=frame_center.y;
     C.y=frame_center.y-4;
     C.x=frame_center.x;
     D.y=frame_center.y+4;
     D.x=frame_center.x;
     cvRectangle(frame,pt1t,pt2t,cvScalar(255,0,0),1,8,0);
     cvRectangle(frame,pt1b,pt2b,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
     //cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0);
     cvCircle( frame, b_cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle
     cvLine(frame, A, B,cvScalar(255,0,255),2,8,0);
     cvLine(frame, C, D,cvScalar(255,0,255),2,8,0);
    

    if (b_cir_center.x!=0&&b_cir_center.y!=100) 
    {
    cvLine(frame, b_cir_center, frame_center,cvScalar(0,255,0),1,8,0);
    }

    if(t_cir_center.x!=0&&t_cir_center.y!=0)
     {
     cvLine(frame, frame_center, t_cir_center,cvScalar(255,255,0),1,8,0);
     }
     if ((b_cir_center.x!=0&&b_cir_center.y!=100)&&(t_cir_center.x!=0&&t_cir_center.y!=0)) 
     {
     cvLine(frame, b_cir_center, t_cir_center,cvScalar(0,255,255),1,8,0);
     printf("%d, %d, %d, %d\n",t_cir_center.x,t_cir_center.y,b_cir_center.x,b_cir_center.y);
     }
		
}
    seq++;
    seq=seq%(seqdiv+4);
     cvShowImage( "mywindow", frame); // show output image
//     cvShowImage( "bot", threshframebot);
//     cvShowImage( "top", threshframetop);
/*   cvReleaseImage(&frame);
   cvReleaseImage(&threshframe);
   cvReleaseImage(&hsvframe);
   cvReleaseImage(&threshframebot);
   cvReleaseImage(&modframe);
   cvReleaseImage(&dilframetop);
   cvReleaseImage(&dilframebot);*/
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27  ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   //v4l.flush();
   cvDestroyWindow( "mywindow" );
   
return 0;
 }
Example #24
0
int main( int argc, char** argv ) {
	
	int		cmd;
	float	desired[8][8]	= { {1,0,0,0,0,0,0,0},	// 0
								{0,1,0,0,0,0,0,0},	// 1
								{0,0,1,0,0,0,0,0},	// 2
								{0,0,0,1,1,0,0,0},	// 3 forward
								{0,0,0,1,1,0,0,0},	// 4 forward
								{0,0,0,0,0,1,0,0},	// 5
								{0,0,0,0,0,0,1,0},	// 6
								{0,0,0,0,0,0,0,1}};	// 7
						 
	CvPoint		p,q;
	CvScalar	line_color		= CV_RGB(0,0,255);
	CvScalar	out_color;

	const char* name_orig		= "Original: press q to save images";
	const char* name_ave		= "input";
	const char* name_weights	= "weights";
	
	const char*	inputCmdFile_name	= "./inputs/dataset1/commandlist.dat";
	const char*	outputFile_name		= "./outputs/output_";
	FILE*		outputFile;
	FILE*		inputCmdFile;
	char		inputName[64];
	char		outputName[64];

	
	CvCapture* capture = cvCreateCameraCapture(0) ;
		
	CvSize frame_size;
	CvScalar ave = cvScalar(1);
	
	
	CvRect  slice_rect;
	CvSize	slice_size;

	static	IplImage*	frame				= NULL;
	static	IplImage*	frame_g				= NULL;
	static	IplImage*	frame_small			= NULL;
	static	IplImage*	frame_weights		= NULL;
	static	IplImage*	frame_w_big			= NULL;
	static	IplImage*	frame_w_final		= NULL;
	static	IplImage*	frame_final			= NULL;	
	
	static	IplImage*	ave_image			= NULL;
//	static	IplImage *scale					= NULL;
	
	static	IplImage*	frame_slices[N_SLICES];



	float	inputs[(SIZE/N_SLICES)*SIZE];
	float	outputs[N_SLICES];
	int		choices[N_SLICES];
//	float	desired[N_SLICES];
//	float	desired[] = {0,0,0,1,1,0,0,0};										//XXX dummy test...	

	//Evo (int nNets, int nInputs, int nHidden, int nOuts)
	Evo*	evoSlice;

	int		ep;
	int		trial;
	int		stepCnt;

	int		flag = 0;

	char	c;
	int		i,j,k,s;
	float	tmp;

////////////////////////////////////////////////////////////////////////////////
// init stuff

	
	inputCmdFile	= fopen(inputCmdFile_name,"r");
	if (inputCmdFile == NULL) {printf("Unable to open: %s",inputCmdFile_name); return 0; }
	
	// create windows for looking at stuff
//	cvNamedWindow( name_slice,	CV_WINDOW_AUTOSIZE );
	cvNamedWindow( name_weights,	CV_WINDOW_AUTOSIZE );
	cvNamedWindow( name_ave,		CV_WINDOW_AUTOSIZE );
	cvNamedWindow( name_orig,		CV_WINDOW_AUTOSIZE );
	

//	frame_size	= cvSize(frame->width,frame->height);
	frame_size	= cvSize(SIZE,SIZE);

#ifdef USECAM
	// capture a frame so we can get an idea of the size of the source
	frame = cvQueryFrame( capture );
	if( !frame ) return 0;
#else
	sprintf(inputName,"./inputs/dataset1/image0000000000.jpg");
	frame = cvLoadImage(inputName, 0 );
	if( !frame ){ printf("ERROR OPENING: %s!!!\n",inputName); return 0;}
#endif


	allocateOnDemand( &frame_g,			cvSize(frame->width,frame->height), IPL_DEPTH_8U, 1 );
	allocateOnDemand( &frame_w_big,		cvSize(frame->width,frame->height), IPL_DEPTH_8U, 1 );	
	allocateOnDemand( &frame_w_final,	cvSize(frame->width,frame->height), IPL_DEPTH_8U, 3 );	
	allocateOnDemand( &frame_final,		cvSize(frame->width,frame->height+20), IPL_DEPTH_8U, 3 );	
	
	
	allocateOnDemand( &ave_image,		frame_size, IPL_DEPTH_8U, 1 );
	allocateOnDemand( &frame_small,		frame_size, IPL_DEPTH_8U, 1 );
	allocateOnDemand( &frame_weights,	frame_size, IPL_DEPTH_8U, 1 );


	slice_size = cvSize(ave_image->width/N_SLICES, ave_image->height);


	for (i=0;i<N_SLICES;i++) {
		allocateOnDemand( &frame_slices[i], slice_size, IPL_DEPTH_8U, 1);
	}


	for(trial=0;trial<N_TRIALS;trial++) {


		sprintf(outputName,"%s%d.txt", outputFile_name, trial);
		outputFile		= fopen(outputName,"w");


		// init each leariner
		evoSlice = (Evo*)malloc(sizeof(Evo)*N_SLICES);
		for(i=0;i<N_SLICES;i++) {
			evoSlice[i] = Evo(N_NETS, (SIZE/N_SLICES)*SIZE, N_HIDDEN, 1);
			evoSlice[i].choose();
			choices[i] = evoSlice[i].choose();
		}

		ep		= 0;
		stepCnt = 0;
		flag	= 0;


		while(1) {

	////////////////////////////////////////////////////////////////////////////////
	// Pre processing		

#if 0
			// make blank image...
			cvSet(ave_image, cvScalar(0));

	
			for (i=0;i<NF;i++) {
	
				// get image
#ifdef USECAM
				frame = cvQueryFrame( capture );
				if( !frame ) break;
#else				
				sprintf(inputName,"./inputs/dataset1/image%010d.jpg",stepCnt);
				frame = cvLoadImage(inputName, 0 );
				if( !frame ){ printf("ERROR OPENING: %s!!!\n",inputName); return 0;}
				stepCnt++;
#endif			
				// convert it to grey
				cvConvertImage(frame, frame_g );//, CV_CVTIMG_FLIP);


				// resize
				cvResize(frame_g, frame_small);

				
				// take difference
				cvSub(frame_small, ave_image, ave_image);

			}

			for(j=0;j<SIZE;j++) {
				for(k=0;k<SIZE;k++) {
					PIX(ave_image,k,j) = (char)(PIX(ave_image,k,j)*10);
				}
			}

#endif

#if 0
			frame = cvQueryFrame( capture );
			if( !frame ) break;
			cvConvertImage(frame, frame_g );
			cvResize(frame_g, frame_small);
			cvConvertImage(frame_small, ave_image );
#endif
		
#if 1
			sprintf(inputName,"./inputs/dataset1/image%010d.jpg",stepCnt);
			frame = cvLoadImage(inputName, 0 );
			if( !frame ){ printf("ERROR OPENING: %s!!!\n",inputName); break;}
			cvConvertImage(frame, frame_g );
			cvResize(frame_g, frame_small);
			cvConvertImage(frame_small, ave_image );
			
//			cvCanny(ave_image, ave_image, 50, 40,5);
			
#endif
		
		
	//		cvDilate(ave_image, ave_image,NULL,4);


	////////////////////////////////////////////////////////////////////////////////
	// Generate NN inputs


			// slice it up
			for (i=0;i<N_SLICES;i++) {
	
				slice_rect = cvRect(i*ave_image->width/N_SLICES, 0, ave_image->width/N_SLICES, ave_image->height);

				cvSetImageROI(ave_image, slice_rect);
				
				cvCopy(ave_image, frame_slices[i], NULL);

			}

			cvResetImageROI(ave_image);  // remove this when we don't care about looking at the ave

	////////////////////////////////////////////////////////////////////////////////
	// Evaluate NN
			if (stepCnt == N_LEARN)
				flag = 1;
	
			if( (flag == 1) && (stepCnt%N_LEARN == 0)) {	// every N_LEARN images switch
		
				ep++;
				fprintf(outputFile,"%d",ep);

				for(i=0;i<N_SLICES;i++) {
					evoSlice[i].replace();
					choices[i] = evoSlice[i].choose();
				
					fprintf(outputFile,"\t%1.3f",evoSlice[i].netPool[evoSlice[i].best()].grade);
				
				}
			
				fprintf(outputFile,"\n");
			
				if(ep >= N_EPISODES) break;
			
				// draw weights image
				for(s=0;s<N_SLICES;s++) {
		
					for(j=0;j<SIZE;j++) {
						for(k=0;k<(SIZE/N_SLICES);k++) {
				
							tmp = 0;
							for(i=0;i<N_HIDDEN;i++) {
								tmp += evoSlice[s].mutant->nodeHidden->w[(j*(SIZE/N_SLICES))+k+1];
							}
					
							PIX(frame_weights,k+(s*SIZE/N_SLICES),j) = (char)((tmp/N_HIDDEN)*255 + 127);
		//					printf("%d\t",(char)((tmp/N_HIDDEN)*255));
						}
					}
				}

				cvResize(frame_weights, frame_w_big, CV_INTER_LINEAR);
				cvConvertImage(frame_w_big, frame_w_final);
			
			}


			fscanf(inputCmdFile,"%d",&cmd);

			printf("\nTrial: %d   Episode: %d   Devin's cmd: %d\n",trial,ep,cmd);
			for(i=0;i<N_SLICES;i++)
				printf("%1.3f\t",desired[cmd][i]);
			printf("\n");
		

			for(i=0;i<N_SLICES;i++) {
	//			cvShowImage( name_slice, frame_slices[i] );

				// strip pixel data into a single array
				for(j=0;j<SIZE;j++) {
					for(k=0;k<(SIZE/N_SLICES);k++) {
						inputs[(j*(SIZE/N_SLICES))+k]	= (float)PIX(frame_slices[i],k,j)/255.0;
					}
				}


	//			printf("\n%d: Eval slice %d\n",stepCnt,i);
				outputs[i] = evoSlice[i].eval(inputs, &desired[cmd][i]);
	//			outputs[i] = desired[i];
				printf("%1.3f\t",outputs[i]);

			}
			printf("\n");

			for(i=0;i<N_SLICES;i++) {
				printf("%d\t",choices[i]);
			}
			printf("\n");

			for(i=0;i<N_SLICES;i++) {
				printf("%1.3f\t",evoSlice[i].mutant->grade);
			}
			printf("\n");


		
	////////////////////////////////////////////////////////////////////////////////
	// GUI stuff

		
		
		
			// copy input image into larger final image
			cvSetImageROI(frame_final, cvRect(0, 0, frame_w_big->width, frame_w_big->height));
			cvConvertImage(frame, frame_final);
			cvResetImageROI(frame_final);

			// draw slice markers
			for(i=1;i<N_SLICES;i++) {
				// on the final frame...
				p.x = (int)(i*frame_final->width/N_SLICES);
				p.y = 0;
				q.x = p.x;
				q.y = (int)frame_final->height;
				cvLine( frame_final, p, q, line_color, 2, CV_AA, 0 );

				// on the weights
				p.x = (int)(i*frame_w_final->width/N_SLICES);
				p.y = 0;
				q.x = p.x;
				q.y = (int)frame_w_final->height;
				cvLine( frame_w_final, p, q, line_color, 2, CV_AA, 0 );
			}

			// draw output indicators
			for(i=0;i<N_SLICES;i++) {
				out_color = CV_RGB(outputs[i]*255,0,0);
				p.x = (int)(i*frame_final->width/N_SLICES);
				p.y = (int)(frame_final->height-20);
				q.x = (int)(p.x+frame_final->width/N_SLICES);
				q.y = (int)(p.y+20);
				cvRectangle( frame_final, p, q, out_color, CV_FILLED, CV_AA, 0 );
			}
		
		
			cvShowImage( name_ave,		ave_image );
			cvShowImage( name_orig,		frame_final );
			cvShowImage( name_weights,	frame_w_final );
		
			c = cvWaitKey(2);
		
			if( c == 27 ) break;
			else if( c == 'q') {
				cvSaveImage("weights.jpg",frame_w_final);
				cvSaveImage("output.jpg",frame_final);
			}

			stepCnt++;
			if (stepCnt>=(N_STEPS-(N_STEPS%N_LEARN))) {
				stepCnt=0;
				rewind(inputCmdFile);
			}


		} // end while

		free(evoSlice);
		fclose(outputFile);
	} // end trial for

////////////////////////////////////////////////////////////////////////////////
// clean up
//	delete &evo;

	fclose(inputCmdFile);
	
	cvReleaseCapture(	&capture );
	cvDestroyWindow(	name_ave );
	cvDestroyWindow(	name_orig );
	cvDestroyWindow(	name_weights );
}
Example #25
0
void detect_and_draw_objects(GF_InputSensorDevice *ifce, IplImage* image,
                              CvHaarClassifierCascade* cascade,
                              int do_pyramids )
{
    IplImage* small_image = image;
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* faces;
    int i, scale = 1;
    //CvRect* theRealFace;
    int theRealX=0, theRealY=0, theRealHeight=0 , theRealWidth=0;

    int tmpMaxSurface=0;


    if( do_pyramids )
    {
        small_image = cvCreateImage( cvSize(image->width/2,image->height/2), IPL_DEPTH_8U, 3 );
        cvPyrDown( image, small_image, CV_GAUSSIAN_5x5 );
        scale = 2;
    }

    faces = cvHaarDetectObjects( small_image, cascade, storage, 1.2, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(0,0) );

    for( i = 0; i < faces->total; i++ )
    {
        
        CvRect face_rect = *(CvRect*)cvGetSeqElem( faces, i );
       /* cvRectangle( image, cvPoint(face_rect.x*scale,face_rect.y*scale),
                     cvPoint((face_rect.x+face_rect.width)*scale,
                             (face_rect.y+face_rect.height)*scale),
                     CV_RGB(0,255,0), 3 );*/
    if(face_rect.width*face_rect.height>tmpMaxSurface){
        theRealX=face_rect.x;
        theRealY=face_rect.y;
        theRealHeight=face_rect.height;
        theRealWidth=face_rect.width;
        tmpMaxSurface=face_rect.width*face_rect.height;
        }

    }
    cvRectangle( image, cvPoint(theRealX*scale,theRealY*scale),
                     cvPoint((theRealX+theRealWidth)*scale,
                             (theRealY+theRealHeight)*scale),
                     CV_RGB(0,255,0), 3, 8, 0 );

	fprintf(stdout, "translation selon X : %d - translation selon Y : %d\n", (theRealX - prev_x0), (theRealY -prev_y0) );

	/*send data frame to GPAC*/
	{
		char *buf;
		u32 buf_size;
		GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
		gf_bs_write_int(bs, 1, 1); 
		gf_bs_write_float(bs, (Float) (theRealX - 640/2) );
		gf_bs_write_float(bs, (Float) (480/2 - theRealY) );

		gf_bs_align(bs);
		gf_bs_get_content(bs, &buf, &buf_size);
		gf_bs_del(bs);
		ifce->DispatchFrame(ifce, buf, buf_size);
		gf_free(buf);
	}


	prev_x0=theRealX;
	prev_y0=theRealY;
	
	if( small_image != image )
        cvReleaseImage( &small_image );
    
	cvReleaseMemStorage( &storage );
}
unsigned long Test()
{
	bool visualize = true;
	std::string RangeWindowName = "Range";
	std::string HoughWindowName = "Hough";

	IplImage* visualizationReferenceImage = cvLoadImage("Pictures/building.jpg");
	CvMemStorage* storage = cvCreateMemStorage(0);	/// Line endings storage
	CvSeq* lines = 0;
	int AngleBins = 45;	/// Controls angle resolution of Hough trafo (bins per pi)
	IplImage* Image = cvCreateImage(cvGetSize(visualizationReferenceImage), IPL_DEPTH_8U, 3);	/// Visualization image
	cvCopyImage(visualizationReferenceImage,Image);
	IplImage* GrayImage = cvCreateImage(cvGetSize(Image), IPL_DEPTH_8U, 1);
	cvCvtColor(Image, GrayImage, CV_RGB2GRAY);
	IplImage* CannyImage = cvCreateImage(cvGetSize(Image), IPL_DEPTH_8U, 1);	/// Edge image
	cvCanny(GrayImage, CannyImage, 25, 50);
	CvPoint ROIp1 = cvPoint(100,10);		/// Tablet ROI
	CvPoint ROIp2 = cvPoint(visualizationReferenceImage->width-40+ROIp1.x,visualizationReferenceImage->height-200+ROIp1.y);
	cvSetImageROI(CannyImage,cvRect(ROIp1.x,ROIp1.y,ROIp2.x-ROIp1.x,ROIp2.y-ROIp1.y));
	cvRectangle(Image, ROIp1, ROIp2, CV_RGB(0,255,0));
	//int maxd = cvRound(sqrt(sqrt((double)ROIp2.x-ROIp1.x)+sqrt((double)ROIp2.y-ROIp1.y)));	/// Maximum of possible distance value in Hough space
	int maxd = cvRound(sqrt((double)(((ROIp2.x-ROIp1.x)*(ROIp2.x-ROIp1.x))+((ROIp2.y-ROIp1.y)*(ROIp2.y-ROIp1.y)))));	/// Maximum of possible distance value in Hough space
	IplImage* HoughSpace = cvCreateImage(cvSize(maxd,AngleBins+1),IPL_DEPTH_8U, 1);		/// Hough space image (black=no line, white=there are lines at these bins)
	cvZero(HoughSpace);
	
	/// Hough transformation
	int AccumulatorThreshold = 100;		/// Threshold parameter. A line is returned by the function if the corresponding accumulator value is greater than threshold.
	double MinimumLineLength = 50;		/// For probabilistic Hough transform it is the minimum line length.
	double MaximumGap = 4;				/// For probabilistic Hough transform it is the maximum gap between line segments lieing on the same line to treat them as the single line segment (i.e. to join them).
	lines = cvHoughLines2(CannyImage, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/AngleBins, AccumulatorThreshold, MinimumLineLength, MaximumGap);
	
	for(int i = 0; i < lines->total; i++ )
	{
		CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
		/// Endings of a line
		CvPoint p0 = cvPoint(line[0].x+ROIp1.x,line[0].y+ROIp1.y);
		CvPoint p1 = cvPoint(line[1].x+ROIp1.x,line[1].y+ROIp1.y);
		cvLine(Image, p0, p1, CV_RGB(255,0,0), 3, 8 );
		
		/// Slope/angle of line
		double phi = CV_PI/2;
		if(p0.x != p1.x) phi = atan((double)(p1.y-p0.y)/(double)(p1.x-p0.x));
		phi += (phi < 0)*CV_PI;

		/// Hessian normal form parameters: d = x*cos(alpha) + y*sin(alpha)
		/// with alpha in [0...pi], d in [0...maxd]
		double alpha = phi+CV_PI/2;
		alpha -= (alpha > CV_PI)*CV_PI;

		double d = p0.x;
		if(p0.x != p1.x)
		{
			double n = p1.y - (p1.y-p0.y)/(p1.x-p0.x) * p1.x;
			d = abs(n * cos(phi));
		}

		/// Write Line into Hough space
		cvLine(HoughSpace, cvPoint(cvRound(d),cvRound(alpha/CV_PI*AngleBins)),cvPoint(cvRound(d),cvRound(alpha/CV_PI*AngleBins)),CV_RGB(255,255,255));
	}
	if(visualize)
	{
		cvNamedWindow(RangeWindowName.c_str());
		cvNamedWindow(HoughWindowName.c_str());
		cvShowImage(RangeWindowName.c_str(), Image);
		cvShowImage(HoughWindowName.c_str(), HoughSpace);
		cvWaitKey(0);
	}
	cvCopyImage(Image,visualizationReferenceImage);

	cvReleaseImage(&GrayImage);
	cvReleaseImage(&CannyImage);

	/*
	IplImage* img1 = cvLoadImage("Cob3.jpg");
	IplImage* img2 = cvCreateImage(cvGetSize(img1),img1->depth,1);
	IplImage* img3 = cvCreateImage(cvGetSize(img1),img1->depth,1);
	IplImage* img4 = cvCreateImage(cvGetSize(img1),img1->depth,1);
	cvNamedWindow("Img1");
	cvNamedWindow("Img2");
	cvNamedWindow("Img3");
	cvCvtColor(img1, img2, CV_RGB2GRAY);
	cvCanny(img2, img3, 100, 200);
	cvShowImage("Img1", img1);
	cvShowImage("Img2", img2);
	cvShowImage("Img3", img3);
	cvWaitKey(0);
	cvReleaseImage(&img1);
	cvReleaseImage(&img2);
	cvReleaseImage(&img3);
	cvDestroyAllWindows();

	/*
	IplImage* Img1;
	IplImage* Img2;
	IplImage* Img3;

	cvNamedWindow("Img");
	while (cvGetWindowHandle("Img"))
	{
		if(cvWaitKey(10)=='q') break;

		/// Uncomment when using <code>GetColorImage</code> instead of <code>GetColorImage2</code>
	    //ColorImage = cvCreateImage(cvSize(1388,1038),IPL_DEPTH_8U,3);
		if (colorCamera->GetColorImage2(&Img1) == libCameraSensors::RET_FAILED)
		//if (colorCamera->GetColorImage(ColorImage, true) == libCameraSensors::RET_FAILED)
		{
			std::cerr << "TestCameraSensors: Color image acquisition failed\n";
			getchar();
			return ipa_utils::RET_FAILED;
		}

		if (colorCamera->GetColorImage2(&Img2) == libCameraSensors::RET_FAILED)
		{
			std::cerr << "TestCameraSensors: Color image acquisition failed\n";
			getchar();
			return ipa_utils::RET_FAILED;
		}

		Img3 = cvCreateImage(cvGetSize(Img1),Img1->depth,Img1->nChannels);
		cvSub(Img1, Img2, Img3);
		cvShowImage("Img", Img3);

		cvReleaseImage(&Img1);
		cvReleaseImage(&Img2);
		cvReleaseImage(&Img3);
	}*/

	return ipa_utils::RET_OK;
}
  void cvRenderTracks(CvTracks const tracks, IplImage *imgSource, IplImage *imgDest, unsigned short mode, CvFont *font)
  {
    CV_FUNCNAME("cvRenderTracks");
    __CV_BEGIN__;

    CV_ASSERT(imgDest&&(imgDest->depth==IPL_DEPTH_8U)&&(imgDest->nChannels==3));

    if ((mode&CV_TRACK_RENDER_ID)&&(!font))
    {
      if (!defaultFont)
      {
	font = defaultFont = new CvFont;
	cvInitFont(font, CV_FONT_HERSHEY_DUPLEX, 0.5, 0.5, 0, 1);
      }
      else
	font = defaultFont;
    }

    if (mode)
    {
      for (CvTracks::const_iterator it=tracks.begin(); it!=tracks.end(); ++it)
      {
	if (mode&CV_TRACK_RENDER_ID)
	  if (!it->second->inactive)
	  {
	    stringstream buffer;
	    buffer << it->first;
	    cvPutText(imgDest, buffer.str().c_str(), cvPoint((int)it->second->centroid.x, (int)it->second->centroid.y), font, CV_RGB(0.,255.,0.));
	  }

	if (mode&CV_TRACK_RENDER_BOUNDING_BOX)
	  if (it->second->inactive)
	    cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 0., 50.));
	  else
	    cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 0., 255.));

	if (mode&CV_TRACK_RENDER_TO_LOG)
	{
	  clog << "Track " << it->second->id << endl;
	  if (it->second->inactive)
	    clog << " - Inactive for " << it->second->inactive << " frames" << endl;
	  else
	    clog << " - Associated with blob " << it->second->label << endl;
	  clog << " - Lifetime " << it->second->lifetime << endl;
	  clog << " - Active " << it->second->active << endl;
	  clog << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
	  clog << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
	  clog << endl;
	}

	if (mode&CV_TRACK_RENDER_TO_STD)
	{
	  cout << "Track " << it->second->id << endl;
	  if (it->second->inactive)
	    cout << " - Inactive for " << it->second->inactive << " frames" << endl;
	  else
	    cout << " - Associated with blobs " << it->second->label << endl;
	  cout << " - Lifetime " << it->second->lifetime << endl;
	  cout << " - Active " << it->second->active << endl;
	  cout << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
	  cout << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
	  cout << endl;
	}
      }
    }

    __CV_END__;
  }
vector<VisionRecognitionResult> IPEL_Haar2FaceEyeDetectionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	//PrintMessage("SUCCESS:IPEL_Haar2FaceEyeDetectionComp::Recognize()\n");

	vector<VisionRecognitionResult> _recognitionResult(0);

	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&image[0];

	if( _storage ) cvClearMemStorage( _storage );

	if( _cascade_f ) {
		/* detect faces */
		CvSeq *faces = cvHaarDetectObjects(cvImage, _cascade_f, _storage,
			1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize( 30, 30 ) );

		if( faces && faces->total>0) {
			/* Get region of face */
			int nfaces = faces->total; // faces->total값이 변한다. 
			_recognitionResult.resize (nfaces);
			CvRect *fr = new CvRect[nfaces];

			for( int i = 0; i < (faces ? nfaces : 0); i++ ) {
				/* draw a rectangle */
				CvRect *r = (CvRect*)cvGetSeqElem(faces, i);
				memcpy(&fr[i],r,sizeof(CvRect));					

				//rec.type = 1;
				_recognitionResult[i].name = "Face";
				/*- Get Upper left rectangle corner coordinate -*/
				_recognitionResult[i].point1X = (int)((r->x) + 0.5);
				_recognitionResult[i].point1Y = (int)((r->y) + 0.5);
				/*- Get Upper right rectangle corner coordinate -*/
				_recognitionResult[i].point2X = (int)((r->x + r->width) + 0.5);
				_recognitionResult[i].point2Y = (int)((r->y) + 0.5);
				/*- Get Lower right rectangle corner coordinate -*/
				_recognitionResult[i].point3X = (int)((r->x + r->width) + 0.5);
				_recognitionResult[i].point3Y = (int)((r->y + r->height) + 0.5);
				/*- Get Lower left rectangle corner coordinate -*/
				_recognitionResult[i].point4X = (int)((r->x) + 0.5);
				_recognitionResult[i].point4Y = (int)((r->y + r->height) + 0.5);
			}

			// Haar함수를 두번 수행할때 결과가 다를수 있다.
			for( int i = 0; i < (faces ? nfaces : 0); i++ ) {
				/* reset buffer for the next object detection */
				cvClearMemStorage(_storage);

				/* Set the Region of Interest: estimate the eyes' position */
				cvSetImageROI(cvImage, cvRect(fr[i].x, fr[i].y + (int)(fr[i].height/5.5), fr[i].width, (int)(fr[i].height/3.0) ) );

				/* detect eyes */
				CvSeq* eyes = cvHaarDetectObjects(cvImage, _cascade_e, _storage,
					1.15, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize(25, 15));

				/* draw a rectangle for each eye found */
				for(int j = 0; j < (eyes ? eyes->total : 0); j++ ) {
					if(j>1) break;
					CvRect *er = (CvRect*) cvGetSeqElem( eyes, j );
					cvRectangle(cvImage,
						cvPoint(er->x, er->y), 
						cvPoint(er->x + er->width, er->y + er->height),
						CV_RGB(255, 0, 0), 1, 8, 0);

				}

				cvResetImageROI(cvImage);
			}

			delete fr;
		}
	}

#if 0
	if( _recognitionResult.size() ) {
		for( std::vector<VisionRecognitionResult>::iterator it = _recognitionResult.begin()  ;  it != _recognitionResult.end()  ;  it++ ) {
			cvLine(cvImage,
				cvPoint(it->point1X,it->point1Y),
				cvPoint(it->point2X,it->point2Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point2X,it->point2Y),
				cvPoint(it->point3X,it->point3Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point3X,it->point3Y),
				cvPoint(it->point4X,it->point4Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point4X,it->point4Y),
				cvPoint(it->point1X,it->point1Y),
				CV_RGB(0, 255, 0));
		}
	}
#endif

	cvReleaseImageHeader( &cvImage );

	return _recognitionResult;
}
Example #29
0
// Primary function
double Camera::getCameraHeading(bool &coneExists)
{
	coneExists = 0;
	// Take a picture
	frame = cvQueryFrame(capture);


	//time_t timeval;


	// Set up
	data = (uchar *)frame->imageData;
	datar = (uchar *)result->imageData;

	// Save the initial picture
	//cvSaveImage("picture.jpeg",frame);
	// r 255
	// g 117
	// b 0

	int idealRed = 255;
	int idealGreen = 117;
	int idealBlue = 10;

	//int redRange = 150;
	//int greenRange = 20;
	//int blueRange = 60;	// need 100 for sun directly behind cone


	//  pixel must have a r value > idealRed - redRange
	//                  a g value < idealGreen + greenRange
	//		    a b value < idealBlue + blueRange


	// Iterate through every pixel looking for rgb values within each range
	for(int i = 0; i < (frame->height); i++) {
		for(int j = 0; j < (frame->width); j++) {
			if((data[i*frame->widthStep+j*frame->nChannels+2] > (idealRed-redRange)) && 		// red value > 255-125
			   (data[i*frame->widthStep+j*frame->nChannels+1] < (idealGreen+greenRange)) && 	// green value < 117+40
			   (data[i*frame->widthStep+j*frame->nChannels]   < (idealBlue+blueRange))) 		// blue value < 0 + 100
				datar[i*result->widthStep+j*result->nChannels] = 255;
			else
				datar[i*result->widthStep+j*result->nChannels] = 0;
		}
	}



	//std::cout << "Color change complete.\n";

	/* Apply erosion and dilation to eliminate some noise and even out blob */
	if(erosion >= 0) {
		cvErode(result,result,0,erosion);
	}
	if(dilation >= 0) {
		cvDilate(result,result,0,dilation);
	}

	//std::cout << "Erosion and dilation complete.\n";

	/* FindContours should not alter result (its const in the function declaration), but it does...
	This function looks for contours (edges of polygons) on the already monochrome image */
	cvFindContours(result,storage,&contours);

	/* Draw the contours on contourimage */
	if(contours) {
		cvDrawContours(contourimage,contours,cvScalarAll(255),cvScalarAll(255),100);
	}

	//std::cout << "Contour drawing complete.\n";

	//time(&timeval);
	//std::string filename("boxes.jpeg");
	//filename = filename + ctime(&timeval);
	//cvSaveImage(filename.c_str(),contourimage);
//	cvSaveImage("boxes.jpeg",contourimage);




	//std::cout << "Countour image saved.\n";

	/* Calculate the bounding rectangle */
	bound = cvBoundingRect(contourimage,0);

	//std::cout << "Bounding rectangle computed.\n";

	/* Reset the contourimage image (otherwise contourimage turns into an Etch A Sketch) */
	if(contours) {
		//delete contours;
		//contours = new CvSeq;
		//cvZero(contours);
		cvClearSeq(contours);
	}

	cvZero(contourimage);

	//std::cout << "Countour image zeroed.\n";

	/* Calculate the bounding rectangle's top-left and bottom-right vertex */
	p1.x = bound.x;
	p2.x = bound.x + bound.width;
	p1.y = bound.y;
	p2.y = bound.x + bound.height;

	//std::cout << "Bound calculations complete.\n";





	/* Check if there is a rectangle in frame */
	if (p1.x == 0 && p1.y == 0) {


		//cvSaveImage("picture.jpeg",frame);
		cvReleaseCapture(&capture);
//		adjustment = std::numeric_limits<double>::quiet_NaN();
		adjustment = 0;
		coneExists = 0;
		return adjustment;

	} else {


		// Draw the bounding rectangle on the original image
		cvRectangle(frame,p1,p2,CV_RGB(255,0,0),3,8,0);

		// Calculate where the center of the rectangle would be
		// Add half of the bounding rectangle's width to the top-left point's x-coordinate
		p1.x = bound.x + (bound.width/2);

		// Add half of the difference between top and bottom edge to the bottom edge
		p1.y = p2.y + ((p1.y - p2.y)/2);

		// Draw a small circle at the center of the bounding rectangle
		cvCircle(frame,p1,3,CV_RGB(0,0,255),1,8,0);



		/* Check if there is a rectangle in frame */
		double fieldDegrees = 43.3;
		double halfField = fieldDegrees/2;
		adjustment = (double)p1.x;
		adjustment = adjustment/frame->width;
		adjustment = adjustment*fieldDegrees;
		adjustment = adjustment-halfField;
		if(adjustment == -0)
				adjustment = 0;




		cvZero(result);

	//	cvSaveImage("picture.jpeg",frame);


		cvReleaseCapture(&capture);

		coneExists = 1;


		return adjustment;
	}
}
Example #30
0
/****************************************************************************
 * Filter: Check for faces and raises an event when one is found.
 ****************************************************************************
 * p_pic: A picture_t with its p_data_orig member set to an array of
 * IplImages (one image for each picture_t plane).
 ****************************************************************************/
static picture_t *Filter( filter_t *p_filter, picture_t *p_pic )
{
    IplImage** p_img = NULL;
    int i_planes = 0;
    CvPoint pt1, pt2;
    int i, scale = 1;
 
    if ((!p_pic) )
    {
        msg_Err( p_filter, "no image array" );
        return NULL;
    }
    if (!(p_pic->p_data_orig))
    {
        msg_Err( p_filter, "no image array" );
        return NULL;
    }
    //(hack) cast the picture_t to array of IplImage*
    p_img = (IplImage**) p_pic->p_data_orig;
    i_planes = p_pic->i_planes;

    //check the image array for validity
    if ((!p_img[0]))    //1st plane is 'I' i.e. greyscale
    {
        msg_Err( p_filter, "no image" );
        return NULL;
    }
    if ((p_pic->format.i_chroma != VLC_CODEC_I420))
    {
        msg_Err( p_filter, "wrong chroma - use I420" );
        return NULL;
    }
    if (i_planes<1)
    {
        msg_Err( p_filter, "no image planes" );
        return NULL;
    }

    //perform face detection
    cvClearMemStorage(p_filter->p_sys->p_storage);
    CvSeq* faces = NULL;
    if( p_filter->p_sys->p_cascade )
    {
        //we should make some of these params config variables
        faces = cvHaarDetectObjects( p_img[0], p_filter->p_sys->p_cascade,
            p_filter->p_sys->p_storage, 1.15, 5, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(20, 20) );
        //create the video_filter_region_info_t struct
        CvRect* r;
        if (faces && (faces->total > 0))
        {
            //msg_Dbg( p_filter, "Found %d face(s)", faces->total );
            free( p_filter->p_sys->event_info.p_region );
            p_filter->p_sys->event_info.p_region = NULL;
            if( NULL == ( p_filter->p_sys->event_info.p_region =
                  (video_filter_region_info_t *)malloc(faces->total*sizeof(video_filter_region_info_t))))
            {
                return NULL;
            }
            memset(p_filter->p_sys->event_info.p_region, 0, faces->total*sizeof(video_filter_region_info_t));
            p_filter->p_sys->event_info.i_region_size = faces->total;
        }

        //populate the video_filter_region_info_t struct
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
            r = (CvRect*)cvGetSeqElem( faces, i );
            pt1.x = r->x*scale;
            pt2.x = (r->x+r->width)*scale;
            pt1.y = r->y*scale;
            pt2.y = (r->y+r->height)*scale;
            cvRectangle( p_img[0], pt1, pt2, CV_RGB(0,0,0), 3, 8, 0 );

            *(CvRect*)(&(p_filter->p_sys->event_info.p_region[i])) = *r;
            p_filter->p_sys->event_info.p_region[i].i_id = p_filter->p_sys->i_id++;
            p_filter->p_sys->event_info.p_region[i].p_description = "Face Detected";
        }

        if (faces && (faces->total > 0))    //raise the video filter event
            var_TriggerCallback( p_filter->p_libvlc, VIDEO_FILTER_EVENT_VARIABLE );
    }
    else
        msg_Err( p_filter, "No cascade - is opencv-haarcascade-file valid?" );

    return p_pic;
}