Exemplo n.º 1
0
int main(int argc, char **argv) {
	
	// Prepare log file and check argument count
	FILE* log_file = fopen("track_results.log","a");
	if(argc != 4) {
		fprintf(log_file, "Incorrect number of arguments.\n");
		return 1;
	}

	int desired_fps = atoi(argv[3]);
	if(desired_fps > 60 || desired_fps < 1) {
		fprintf(log_file, "Invalid FPS: please select a value in the range [1-60].\n");
		return 1;
	}

	////////// GROUND TRUTH SETUP AND PROCESSING //////////

	// Open and extract bounding rect info from gt file
	char buffer[100];
	memset(buffer, 0, sizeof(buffer));
	int gt_rect[4];

	FILE* gt_file = fopen(argv[2], "r");
	fgets(buffer, 100, gt_file);

	char* token = strtok(buffer, ",");
	gt_rect[0] = atoi(token);

	int i = 1;
	while(i < 4) {
		token = strtok(NULL, ",");
		gt_rect[i] = atoi(token);
		i++;
	}

	fclose(gt_file);

	// Load image and compress to a reasonable size
	IplImage* gt = cvLoadImage(argv[1]);

	IplImage* gt_resized = cvCreateImage(cvSize(320, 240), gt->depth, gt->nChannels); //1280,720
	cvResize(gt, gt_resized, CV_INTER_NN);

	// Show bounding rect
	CvPoint corner1 = cvPoint(gt_rect[0], gt_rect[1]);
	CvPoint corner2 = cvPoint(gt_rect[0] + gt_rect[2], gt_rect[1] + gt_rect[3]);
	CvScalar rect_color = CV_RGB(255,0,0);

	cvRectangle(gt_resized, corner1, corner2, rect_color, 2);

	cvNamedWindow( "Ground Truth Reference", CV_WINDOW_AUTOSIZE );
    cvShowImage( "Ground Truth Reference", gt_resized );

	// Set ROI for ground truth
	CvRect quarter = cvRect(gt_rect[0], gt_rect[1], gt_rect[2], gt_rect[3]);
	cvSetImageROI(gt_resized, quarter);

	////////// PREPARE GOPRO FOR VIDEO CAPTURE //////////

	// Basic connectivity tests
	HINTERNET hSession = WinHttpOpen( L"GoPro HTTP Transfer/1.1", 
                              WINHTTP_ACCESS_TYPE_DEFAULT_PROXY,
                              WINHTTP_NO_PROXY_NAME, 
                              WINHTTP_NO_PROXY_BYPASS, 0 ); 

	if(hSession == NULL) {
		printf("Error %u in WinHttpOpen.\n", GetLastError());
		std::cin.get();
		return 1;
	}

	if( !WinHttpSetTimeouts( hSession, 10000, 10000, 10000, 10000 )) {
		printf( "Error %u in WinHttpSetTimeouts.\n", GetLastError());
		std::cin.get();
		return 1;
	}

	HINTERNET hConnect = WinHttpConnect( hSession, L"10.5.5.9", 80, 0);

	if(hConnect == NULL) {
		printf("Error %u in WinHttpConnect.\n", GetLastError());
		std::cin.get();
		return 1;
	}
	
	// Power on
	bool error = ping_request(hConnect, L"/bacpac/PW?t=goprohero&p=%01");
	if(error) {
		return 1;
	}

	Sleep(5000); //give time to boot up

	//Clear memory

	error = ping_request(hConnect, L"/camera/DA?t=goprohero");
	if(error) {
		return 1;
	}

	Sleep(5000); //give time to delete files

	// Set to video mode
	error = ping_request(hConnect, L"/camera/CM?t=goprohero&p=%00");
	if(error) {
		return 1;
	}

	Sleep(1000);

	// Set video resolution to 720p, 30FPS
	error = ping_request(hConnect, L"/camera/VR?t=goprohero&p=%00");
	if(error) {
		return 1;
	}

	Sleep(1000);

	WinHttpCloseHandle(hConnect);
	WinHttpCloseHandle(hSession);

	////////// PREPARE TIMING & VIDEO RESOURCES //////////

	// Prepare timing instrumentation (for FPS control)
	__int64 last_time = 0;
	__int64 current_time = 0;
	__int64 freq = 0;
	int frame_time = 1000 / desired_fps;

	// Play video
	cvNamedWindow( "MOV Window", CV_WINDOW_AUTOSIZE );
	CvCapture* track_video = cvCreateFileCapture( "tags.mov" );
	IplImage* current_frame;

	// Record annotated video
	CvSize write_size = cvSize(
       (int)cvGetCaptureProperty( track_video, CV_CAP_PROP_FRAME_WIDTH),
       (int)cvGetCaptureProperty( track_video, CV_CAP_PROP_FRAME_HEIGHT)
    );	
	CvVideoWriter *writer = cvCreateVideoWriter( "output.avi", CV_FOURCC('M','J','P','G'), 20, write_size, 1);

	// Start timer
	QueryPerformanceCounter((LARGE_INTEGER*) &last_time);

	////////// MAIN PROCESSING LOOP //////////

	bool to_search = true;
	bool next = true;
	CvRect est = quarter;

	while(1) {

		// Read in current frame
		current_frame = cvQueryFrame(track_video);
		if(current_frame == NULL) {
			break;
		}

		if(to_search == false) {
			est = process_frame(gt_resized, current_frame, quarter, &next, log_file);
			rect_color = CV_RGB(0,255,0);
		} else {
			est = search(gt_resized, current_frame, quarter, &next, log_file);
			rect_color = CV_RGB(255,0,0);
		}

		fprintf(log_file, "Coordinates: %d , %d\t\t", est.x, est.y);
		if(to_search) {
			fprintf(log_file, "Recommended Action: Search\n");
		} else {
			// X direction flight planning
			if(est.x < ((current_frame->width / 2) - 10)) {
				fprintf(log_file, "Recommended Action: Move Right , ");
			}
			else if(est.x > ((current_frame->width / 2) + 10)) {
				fprintf(log_file, "Recommended Action: Move Left, ");
			}
			else {
				fprintf(log_file, "Recommended Action: Hover, ");
			}
			
			// Y direction flight planning
			if(est.y < ((current_frame->height / 2) - 10)) {
				fprintf(log_file, "Move Backwards\n");
			}
			else if(est.y > ((current_frame->width / 2) + 10)) {
				fprintf(log_file, "Move Forwards\n");
			}
			else {
				fprintf(log_file, "Hover\n");
			}
		}
		to_search = next;

		// Swap frames
		quarter = est;

		CvPoint corner1 = cvPoint(est.x, est.y);
		CvPoint corner2 = cvPoint(est.x + est.width, est.y + est.height);

		cvRectangle(current_frame, corner1, corner2, rect_color, 2);

		// Display frame
		cvShowImage( "MOV Window", current_frame );
		cvWriteFrame( writer, current_frame );

		// FPS Control
		QueryPerformanceCounter((LARGE_INTEGER*) &current_time);
		QueryPerformanceFrequency((LARGE_INTEGER*) &freq);

		int elapsed_time = (int)((current_time - last_time) / freq);
		int wait_time = frame_time - (elapsed_time / 1000);

		if(wait_time < 0) {
			continue;
		}

		char ext_key = cvWaitKey(wait_time);
		if(ext_key == 27) {
			break;
		}

	}

	////////// CLEAN-UP //////////

	cvReleaseCapture( &track_video );
	cvReleaseVideoWriter( &writer );
	cvDestroyWindow( "MOV Window" );

	cvReleaseImage( &gt );
	cvReleaseImage( &gt_resized );

	cvDestroyWindow( "Ground Truth Reference" );

	fclose(log_file);

	return 0;

}
Exemplo n.º 2
0
int main_plate(const char * car_name)
{
	
	/*********************************************准备工作*****************************************/
	IplImage * img_car = NULL;
	IplImage * img_car_after_resize = NULL;
	IplImage * img_after_preprocess = NULL;
	IplImage * img_plate = NULL;
	IplImage * img_after_resize = NULL;
	IplImage * img_character = NULL;

	List rects; /*保存预选车牌位置矩形的列表*/
	double scale = -1; /*在尺寸归一化时要用到*/
	int width = 0, height = 0; /*最开始时候的尺寸归一化的长宽*/
	int number = -1;	/*最后一个字符的数字结果*/
	int count_recog = 0;
	char filename[50];

#if 1
	//cvNamedWindow("img_car", 1);
//	cvNamedWindow("img_car_after_resize", 1);
	//cvNamedWindow("img_after_preprocess", 1);
	//cvNamedWindow("img_plate", 1);
#endif

	if ((img_car = cvLoadImage(car_name, -1)) == NULL) {
		fprintf(stderr, "Can not open car image file in main.c!\n");
		exit(-1);
	}

	/*****************************************开始进行图像处理***************************************/
	/*由于得到的车辆图像中车占的比例太小,所以需要考虑重新截取图像,保证得到的图像中车辆整体占整个图像的比例较大
	 要实现这个目的我们观察发现拍到的照片中车基本都是处于整个图像的中心,所以我们截取整个图像的中心作为新的图片
	 策略:
	 1.先将图片按宽度分成三份,取中间的一份,车牌肯定在这一份中
	 2.将图片上四分之一截取掉,下四分之一截取点,车牌肯定在剩下的二分之一份图片中
	 */
	/*********现在开始进行截取车身操作****************/
#if 0
	IplImage * tmp_img = cvCreateImage(cvSize(1.0 / 3 * img_car->width, 1.0 / 2 * img_car->height), img_car->depth, img_car->nChannels);
	cvSetImageROI(img_car, cvRect(1.0 / 3 * img_car->width, 1.0 / 4 * img_car->height, 1.0 / 3 * img_car->width, 1.0 / 2 * img_car->height));
	cvCopy(img_car, tmp_img);
	cvSaveImage("tmp_img.bmp", tmp_img);
	cvResetImageROI(img_car);
	img_car = cvLoadImage("tmp_img.bmp", -1);					/*img_car现在是新的截取后的图片了*/
	assert(img_car != NULL);

	cvNamedWindow("haha", 1);
	cvShowImage("haha", tmp_img);
	cvWaitKey(0);
#endif

	cut_image(img_car);
	img_car = cvLoadImage("image/tmp_img.bmp", -1);					/*img_car现在是新的截取后的图片了*/

	/********************************************************************************************************/
	/*为了便于对图像进行统一处理,先对图像尺寸进行处理,让图像的尺寸大小合适,
	  一般大概大小为640*480规格的,所以只需要大概按照这个比例进行resize
	 */



	/*用cvResize函数进行处理即可*/
#if 1
	scale = 1.0 * 640 / img_car->width;			/*将长度规整为640即可,宽就按比例伸长就行了*/
	width = scale * img_car->width;
	height = scale * img_car->height;
	img_car_after_resize = cvCreateImage(cvSize(width, height), img_car->depth, img_car->nChannels);
	cvResize(img_car, img_car_after_resize);			/*对尺寸进行归一化,得到宽为640的图像*/
	cvSaveImage("image/img_car_after_resize.bmp", img_car_after_resize);
#endif


	/*图像预处理:输入为尺寸归一化后的车牌图像,输出为一张img_after_preprocess.bmp图像*/
	preprocess_car_img(img_car_after_resize);

	/*读取img_after_preprocess.bmp图像*/
	if ((img_after_preprocess = cvLoadImage("image/img_after_preprocess.bmp", -1)) == NULL) {
		fprintf(stderr, "Can not open file img_after_preprocess.bmp in main.c");
		exit(-1);
	}

#if 1
	/*显示预处理完成后的图像*/
	//cvShowImage("img_car", img_after_preprocess);
	//cvShowImage("img_after_preprocess", img_after_preprocess);
#endif
	
	/***************************************预处理完成,开始找车牌位置*****************************************************************/
	rects = get_location(img_after_preprocess, img_car_after_resize);			/*得到车牌的位置,起初设计阶段是可以有多个预选位置,但是后来发现不用,所以rects其实只有一个位置,但是也是用一个链表装着的*/
	/*由于在get_location中返回的是头结点的next节点,所以这里的参数不用rects->next*/
	assert(count_node(rects) == 1);						/*断言这个链表里只有一个待选车牌位置*/
	/****************************************找到车牌位置,开始截取车牌******************************************************************/
	get_plate_image(img_car_after_resize, rects);		/*得到车牌的图像*/

	img_plate = cvLoadImage("image/plate_img0.bmp", -1);		/*上面那个函数中得到的plate_img.bmp图像*/
	if (img_plate == NULL) {
		fprintf(stderr, "Can not open plate image file!\n");
		exit(-1);
	}

	/*******************************************对车牌进行尺寸变化***************************************************************/
	scale = plate_resize_scale(img_plate);
	resize_image(img_plate,img_after_resize, scale);		/*最后一个参数为5表示将原车牌图像变长为原来的五倍*/
	if ((img_after_resize = cvLoadImage("image/plate_img_after_resize.bmp", -1)) == NULL) {
		fprintf(stderr, "Can not open file plate_img_after_resize.bmp in main.c");
		exit(-1);
	}

	/*******************************************对车牌进行预处理***************************************************************/
	preprocess_plate_image(img_after_resize);			/*对车牌图像进行预处理*/
	
	/********************************************获得车牌上的字符信息**************************************************************/
	get_character(img_after_resize);					/*得到每一个字符的图像*/
	//cvShowImage("image_car", img_after_resize);
	//printf("the plate is: \n");
	count_recog = 0;

    FILE *fp = fopen("result.txt", "wb");
    char buf[1] = {0};
    int ct = 0;
    while(ct++ < 10000) {
	fwrite(buf, 1, sizeof(char), fp);
    }
	fclose(fp);
	while (count_recog < 7) {

		sprintf(filename, "image/character%d.png", count_recog);

		img_character = cvLoadImage(filename, -1);

		if (img_character == NULL) {
			break;
		}

	/*********************************************开始进行字符识别***********************************************************/

		number = character_recognizing(img_character);
		count_recog++;
	}
	cvWaitKey(0);
	printf("Time used = %.2f\n", (double)clock() / CLOCKS_PER_SEC);
	return 0;
}
Exemplo n.º 3
0
int
main (int argc, char **argv)
{
	int i;
	int gui = true;
	IplImage *src_img = 0, *src_gray = 0;
	const char *cascade_name = "haarcascade_frontalface_default.xml";
	CvHaarClassifierCascade *cascade = 0;
	CvMemStorage *storage = 0;
	CvSeq *faces;
	static CvScalar colors[] = {
		{{0, 0, 255}}, {{0, 128, 255}},
		{{0, 255, 255}}, {{0, 255, 0}},
		{{255, 128, 0}}, {{255, 255, 0}},
		{{255, 0, 0}}, {{255, 0, 255}}
	};

	// (1)画像を読み込む
	if (argc < 2 || (src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_COLOR)) == 0)
		return -1;
	if (argc == 3 && strcmp("--no-gui", argv[2]) == 0 )
		gui = false;
	
	src_gray = cvCreateImage (cvGetSize (src_img), IPL_DEPTH_8U, 1);

	// (2)ブーストされた分類器のカスケードを読み込む
	cascade = (CvHaarClassifierCascade *) cvLoad (cascade_name, 0, 0, 0);

	// (3)メモリを確保し,読み込んだ画像のグレースケール化,ヒストグラムの均一化を行う
	storage = cvCreateMemStorage (0);
	cvClearMemStorage (storage);
	cvCvtColor (src_img, src_gray, CV_BGR2GRAY);
	cvEqualizeHist (src_gray, src_gray);

	// (4)物体(顔)検出
	faces = cvHaarDetectObjects (src_gray, cascade, storage, 1.11, 4, 0, cvSize (40, 40));


	// puts("<faces>");
	printf("[{\"faces\":");
	// (5)検出された全ての顔位置に,円を描画する
	for (i = 0; i < (faces ? faces->total : 0); i++) {
		// puts("  <face>");
		
		CvRect *r = (CvRect *) cvGetSeqElem (faces, i);
		CvPoint center;
		int radius;
 		// printf("    <top>%d</top>\n", r->y);
 		// printf("    <right>%d</right>\n", r->x + r->width);
 		// printf("    <bottom>%d</bottom>\n", r->y + r->height);
 		// printf("    <left>%d</left>\n", r->x);
		puts("[{");
		printf("	\"id\":%d,\n", 0);
		printf("	\"x\":%d,\n", r->x);
		printf("	\"y\":%d,\n", r->y);
		printf("	\"w\":%d,\n", r->width);
		printf("	\"h\":%d\n", r->height);
		puts("}]");
		if (i != faces->total - 1) {
			puts(",");
		}
		center.x = cvRound (r->x + r->width * 0.5);
		center.y = cvRound (r->y + r->height * 0.5);
		radius = cvRound ((r->width + r->height) * 0.25);
		cvCircle (src_img, center, radius, colors[i % 8], 3, 8, 0);
		// puts("  </face>");
	}
	// puts("</faces>");
	puts("}]");





	// (6)画像を表示,キーが押されたときに終了
	if (gui) {
		cvNamedWindow ("Face Detection", CV_WINDOW_AUTOSIZE);
		cvShowImage ("Face Detection", src_img);
		cvWaitKey (0);
	}

	cvDestroyWindow ("Face Detection");
	cvReleaseImage (&src_img);
	cvReleaseImage (&src_gray);
	cvReleaseMemStorage (&storage);

	return 0;
}
Exemplo n.º 4
0
void detect_and_draw(IplImage * img, IplImage * depth, IplImage *faceDepthRet, bool save)
{
	int scale = 1;

	// Create a new image based on the input image
	IplImage *temp =
		cvCreateImage(cvSize(img->width / scale, img->height / scale), 8, 3);
	memcpy(temp->imageData, img->imageData, 640 * 480 * 3);

	IplImage *depthTemp =
		cvCreateImage(cvSize(img->width / scale, img->height / scale), 16, 1);
	memcpy(depthTemp->imageData, depth->imageData, 640 * 480 * 2);

	IplImage *faceDepth =
		cvCreateImage(cvSize(img->width / scale, img->height / scale), 16, 1);

	// Create two points to represent the face locations
	CvPoint pt1, pt2;
	int i, j, k;

	// Clear the memory storage which was used before
	cvClearMemStorage(storage);

	// Find whether the cascade is loaded, to find the faces. If yes, then:
	if (cascade)
	{

		// There can be more than one face in an image. So create a growable
		// sequence of faces.
		// Detect the objects and store them in the sequence
		/* CvSeq* faces = cvHaarDetectObjects( temp, cascade, storage, 1.1, 2, 
		   CV_HAAR_DO_CANNY_PRUNING, cvSize(40, 40) ); */
		CvSeq *faces = cvHaarDetectObjects(temp, cascade, storage,
										   1.6, 2, CV_HAAR_DO_CANNY_PRUNING,
										   cvSize(40, 40));

		// Loop the number of faces found.
		for (i = 0; i < (faces ? faces->total : 0); i++)
		{
			// Create a new rectangle for drawing the face
			CvRect *r = (CvRect *) cvGetSeqElem(faces, i);

			// Find the dimensions of the face,and scale it if necessary
			pt1.x = r->x * scale;
			pt2.x = (r->x + r->width) * scale;
			pt1.y = r->y * scale;
			pt2.y = (r->y + r->height) * scale;

			// Draw the rectangle in the input image
			cvRectangle(temp, pt1, pt2, CV_RGB(0, 0, 255), 3, 8, 0);
			cvRectangle(depthTemp, pt1, pt2, CV_RGB(0, 0, 255), 3, 8, 0);

			cvSetImageROI(depth,
						  cvRect(pt1.x, pt1.y, r->width * scale,
								 r->height * scale));

			IplImage *faceDepthTemp =
				cvCreateImage(cvGetSize(depth), depth->depth,
							  depth->nChannels);
			IplImage *faceDepthTemp2 =
				cvCreateImage(cvGetSize(depth), 8,
							  depth->nChannels);

			cvCopy(depth, faceDepthTemp, NULL);

			cvResetImageROI(depth);

			// Maximize standard deviation.
			//stretchFaceDepth(faceDepthTemp);

			cvResize(faceDepthTemp, faceDepth);
			cvConvertScale(faceDepthTemp, faceDepthTemp2, 1.0/256.0, 0);

			cvResize(faceDepthTemp2, faceDepthRet);

			cvReleaseImage(&faceDepthTemp);

			if (save)
			{
				FILE *csvFile = fopen("face.csv", "w");
				for (j = pt1.y; j < pt2.y; j++)
				{
					for (k = pt1.x; k < pt2.x; k++)
					{
						fprintf(csvFile, "%u,",
								(((uint16_t *) (depth->imageData)) +
								 j * depth->width)[k]);
					}
					fprintf(csvFile, "\n");
				}
				printf("Face captured!\n");
				fclose(csvFile);
			}
		}
	}

	// Show the image in the window named "result"
	cvShowImage("result", temp);
	cvShowImage("resultDepth", depthTemp);
	cvShowImage("faceDepth", faceDepth);

	// Release the temp image created.
	cvReleaseImage(&temp);
	cvReleaseImage(&depthTemp);
	cvReleaseImage(&faceDepth);
}
Exemplo n.º 5
0
//
//USAGE:  ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
int main(int argc, char** argv)
{
 	IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
    IplImage *ImaskAVG = 0,*ImaskAVGCC = 0;
    IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
    CvCapture* capture = 0;

	int startcapture = 1;
	int endcapture = 30;
	int c,n;

	maxMod[0] = 3;  //Set color thresholds to default values
	minMod[0] = 10;
	maxMod[1] = 1;
	minMod[1] = 1;
	maxMod[2] = 1;
	minMod[2] = 1;
	float scalehigh = HIGH_SCALE_NUM;
	float scalelow = LOW_SCALE_NUM;
	
	if(argc < 3) {
		printf("ERROR: Too few parameters\n");
		help();
	}else{
		if(argc == 3){
			printf("Capture from Camera\n");
			capture = cvCaptureFromCAM( 0 );
		}
		else {
			printf("Capture from file %s\n",argv[3]);
	//		capture = cvCaptureFromFile( argv[3] );
			capture = cvCreateFileCapture( argv[3] );
			if(!capture) { printf("Couldn't open %s\n",argv[3]); return -1;}
		}
		if(isdigit(argv[1][0])) { //Start from of background capture
			startcapture = atoi(argv[1]);
			printf("startcapture = %d\n",startcapture);
		}
		if(isdigit(argv[2][0])) { //End frame of background capture
			endcapture = atoi(argv[2]);
			printf("endcapture = %d\n"); 
		}
		if(argc > 4){ //See if parameters are set from command line
			//FOR AVG MODEL
			if(argc >= 5){
				if(isdigit(argv[4][0])){
					scalehigh = (float)atoi(argv[4]);
				}
			}
			if(argc >= 6){
				if(isdigit(argv[5][0])){
					scalelow = (float)atoi(argv[5]);
				}
			}
			//FOR CODEBOOK MODEL, CHANNEL 0
			if(argc >= 7){
				if(isdigit(argv[6][0])){
					maxMod[0] = atoi(argv[6]);
				}
			}
			if(argc >= 8){
				if(isdigit(argv[7][0])){
					minMod[0] = atoi(argv[7]);
				}
			}
			//Channel 1
			if(argc >= 9){
				if(isdigit(argv[8][0])){
					maxMod[1] = atoi(argv[8]);
				}
			}
			if(argc >= 10){
				if(isdigit(argv[9][0])){
					minMod[1] = atoi(argv[9]);
				}
			}
			//Channel 2
			if(argc >= 11){
				if(isdigit(argv[10][0])){
					maxMod[2] = atoi(argv[10]);
				}
			}
			if(argc >= 12){
				if(isdigit(argv[11][0])){
					minMod[2] = atoi(argv[11]);
				}
			}
		}
	}

	//MAIN PROCESSING LOOP:
	bool pause = false;
	bool singlestep = false;

    if( capture )
    {
      cvNamedWindow( "Raw", 1 );
		cvNamedWindow( "AVG_ConnectComp",1);
		cvNamedWindow( "ForegroundCodeBook",1);
		cvNamedWindow( "CodeBook_ConnectComp",1);
 		cvNamedWindow( "ForegroundAVG",1);
        int i = -1;
        
        for(;;)
        {
    			if(!pause){
//        		if( !cvGrabFrame( capture ))
//                	break;
//            	rawImage = cvRetrieveFrame( capture );
				rawImage = cvQueryFrame( capture );
				++i;//count it
//				printf("%d\n",i);
				if(!rawImage) 
					break;
				//REMOVE THIS FOR GENERAL OPERATION, JUST A CONVIENIENCE WHEN RUNNING WITH THE SMALL tree.avi file
				if(i == 56){
					pause = 1;
					printf("\n\nVideo paused for your convienience at frame 50 to work with demo\n"
					"You may adjust parameters, single step or continue running\n\n");
					help();
				}
			}
			if(singlestep){
				pause = true;
			}
			//First time:
			if(0 == i) {
				printf("\n . . . wait for it . . .\n"); //Just in case you wonder why the image is white at first
				//AVG METHOD ALLOCATION
				AllocateImages(rawImage);
				scaleHigh(scalehigh);
				scaleLow(scalelow);
				ImaskAVG = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskAVGCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				cvSet(ImaskAVG,cvScalar(255));
				//CODEBOOK METHOD ALLOCATION:
				yuvImage = cvCloneImage(rawImage);
				ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				cvSet(ImaskCodeBook,cvScalar(255));
				imageLen = rawImage->width*rawImage->height;
				cB = new codeBook [imageLen];
				for(int f = 0; f<imageLen; f++)
				{
 					cB[f].numEntries = 0;
				}
				for(int nc=0; nc<nChannels;nc++)
				{
					cbBounds[nc] = 10; //Learning bounds factor
				}
				ch[0] = true; //Allow threshold setting simultaneously for all channels
				ch[1] = true;
				ch[2] = true;
			}
			//If we've got an rawImage and are good to go:                
        	if( rawImage )
        	{
				cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
				//This is where we build our background model
				if( !pause && i >= startcapture && i < endcapture  ){
					//LEARNING THE AVERAGE AND AVG DIFF BACKGROUND
					accumulateBackground(rawImage);
					//LEARNING THE CODEBOOK BACKGROUND
					pColor = (uchar *)((yuvImage)->imageData);
					for(int c=0; c<imageLen; c++)
					{
						cvupdateCodeBook(pColor, cB[c], cbBounds, nChannels);
						pColor += 3;
					}
				}
				//When done, create the background model
				if(i == endcapture){
					createModelsfromStats();
				}
				//Find the foreground if any
				if(i >= endcapture) {
					//FIND FOREGROUND BY AVG METHOD:
					backgroundDiff(rawImage,ImaskAVG);
					cvCopy(ImaskAVG,ImaskAVGCC);
					cvconnectedComponents(ImaskAVGCC);
					//FIND FOREGROUND BY CODEBOOK METHOD
					uchar maskPixelCodeBook;
					pColor = (uchar *)((yuvImage)->imageData); //3 channel yuv image
					uchar *pMask = (uchar *)((ImaskCodeBook)->imageData); //1 channel image
					for(int c=0; c<imageLen; c++)
					{
						 maskPixelCodeBook = cvbackgroundDiff(pColor, cB[c], nChannels, minMod, maxMod);
						*pMask++ = maskPixelCodeBook;
						pColor += 3;
					}
					//This part just to visualize bounding boxes and centers if desired
					cvCopy(ImaskCodeBook,ImaskCodeBookCC);	
					cvconnectedComponents(ImaskCodeBookCC);
				}
				//Display
           		cvShowImage( "Raw", rawImage );
				cvShowImage( "AVG_ConnectComp",ImaskAVGCC);
   				cvShowImage( "ForegroundAVG",ImaskAVG);
 				cvShowImage( "ForegroundCodeBook",ImaskCodeBook);
 				cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);

				//USER INPUT:
	         	c = cvWaitKey(10)&0xFF;
				//End processing on ESC, q or Q
				if(c == 27 || c == 'q' | c == 'Q')
					break;
				//Else check for user input
				switch(c)
				{
					case 'h':
						help();
						break;
					case 'p':
						pause ^= 1;
						break;
					case 's':
						singlestep = 1;
						pause = false;
						break;
					case 'r':
						pause = false;
						singlestep = false;
						break;
					//AVG BACKROUND PARAMS
					case '-':
						if(i > endcapture){
							scalehigh += 0.25;
							printf("AVG scalehigh=%f\n",scalehigh);
							scaleHigh(scalehigh);
						}
						break;
					case '=':
						if(i > endcapture){
							scalehigh -= 0.25;
							printf("AVG scalehigh=%f\n",scalehigh);
							scaleHigh(scalehigh);
						}
						break;
					case '[':
						if(i > endcapture){
							scalelow += 0.25;
							printf("AVG scalelow=%f\n",scalelow);
							scaleLow(scalelow);
						}
						break;
					case ']':
						if(i > endcapture){
							scalelow -= 0.25;
							printf("AVG scalelow=%f\n",scalelow);
							scaleLow(scalelow);
						}
						break;
				//CODEBOOK PARAMS
                case 'y':
                case '0':
                        ch[0] = 1;
                        ch[1] = 0;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'u':
                case '1':
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'v':
                case '2':
                        ch[0] = 0;
                        ch[1] = 0;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'a': //All
                case '3':
                        ch[0] = 1;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'b':  //both u and v together
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
				case 'i': //modify max classification bounds (max bound goes higher)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							maxMod[n] += 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'o': //modify max classification bounds (max bound goes lower)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							maxMod[n] -= 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'k': //modify min classification bounds (min bound goes lower)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							minMod[n] += 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				case 'l': //modify min classification bounds (min bound goes higher)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							minMod[n] -= 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				}
				
            }
		}		
      cvReleaseCapture( &capture );
      cvDestroyWindow( "Raw" );
		cvDestroyWindow( "ForegroundAVG" );
		cvDestroyWindow( "AVG_ConnectComp");
		cvDestroyWindow( "ForegroundCodeBook");
		cvDestroyWindow( "CodeBook_ConnectComp");
		DeallocateImages();
		if(yuvImage) cvReleaseImage(&yuvImage);
		if(ImaskAVG) cvReleaseImage(&ImaskAVG);
		if(ImaskAVGCC) cvReleaseImage(&ImaskAVGCC);
		if(ImaskCodeBook) cvReleaseImage(&ImaskCodeBook);
		if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC);
		delete [] cB;
    }
	else{ printf("\n\nDarn, Something wrong with the parameters\n\n"); help();
	}
    return 0;
}
void zgomot_sare_si_piper( IplImage *img )
{
    int	i, j, k;
    int w, h;
    int nivel_gri;
    
    double	e_zgomot;	//energia zgomotului
    double	e_imagine;	//energia imaginii
    double	SNR;	//raportul semnal-zgomot

    double	nr = 0.1 ;	//procentul de pixeli afectati de zgomot (10%)
    CvScalar   pixel;
    CvScalar   sare_piper;
    IplImage   *imagine_zgomot;

    w = img->width;
    h = img->height;

    imagine_zgomot = cvCreateImage( cvSize( w, h ), IPL_DEPTH_8U, 1 );
    e_imagine = 0;
    e_zgomot =  0;
    SNR = 0;
    
    //se copiaza imaginea originala si se calculeaza energia acesteia
    for( i = 0; i < h; i++ )
    	for( j = 0; j < w; j++ )
        {
             pixel = cvGet2D( img, i, j );
             nivel_gri = (int)( pixel.val[ 0 ] );
             e_imagine += nivel_gri * nivel_gri;
             cvSet2D( imagine_zgomot, i, j, pixel );    
	    }

    srand( rand() );
    
    k = 0;
    while( k < ( int )( w * h * nr ) )
    {
           //se genereaza in mod aleator coordonatele pixelului afectat de zgomot
           i = ( int )( 1. * h * rand() / ( RAND_MAX + 1. ) );
           j = ( int )( 1. * w * rand() / ( RAND_MAX + 1. ) );

           if( (i >= 0) && (i < h) && (j >= 0) && (j < w) )
           {
               //se decide daca zgomotul este "sare" sau "piper"           
               if( ( 100. * rand() / ( RAND_MAX + 1. ) ) >= 50 )
               {
                   sare_piper.val[ 0 ] = 255;
                   
                   pixel = cvGet2D( img, i, j );
                   nivel_gri = (int)( pixel.val[ 0 ] );
                   e_zgomot += (255 - nivel_gri) * (255 - nivel_gri);
               }
               else
               {
                   sare_piper.val[ 0 ] = 0;
                   
                   pixel = cvGet2D( img, i, j );
                   nivel_gri = (int)( pixel.val[ 0 ] );
                   e_zgomot += nivel_gri * nivel_gri;
               }
               
               cvSet2D( imagine_zgomot, i, j, sare_piper );
               k++;
           }
    }

    SNR = 20 * log( e_imagine / e_zgomot );
    printf( "Zgomot impulsiv de tip sare si piper, SNR = %6.3lf dB\n", SNR );

    cvNamedWindow( "Imagine + zgomot sare si piper", 1 );
    cvShowImage( "Imagine + zgomot sare si piper", imagine_zgomot );
    cvSaveImage( "imagine_zgomot.png", imagine_zgomot );
    cvWaitKey(0);
    cvReleaseImage( &imagine_zgomot );
    cvDestroyWindow( "Imagine + zgomot sare si piper" );
}
Exemplo n.º 7
0
	void Run()
	{
		int w, h;
		IplImage *pCapImage;
		PBYTE pCapBuffer = NULL;
		
        // Create camera instance
		_cam = CLEyeCreateCamera(_cameraGUID, _mode, _resolution, _fps);
		
        if(_cam == NULL)		return;
		
        // Get camera frame dimensions
		CLEyeCameraGetFrameDimensions(_cam, w, h);
		
        // Depending on color mode chosen, create the appropriate OpenCV image
		if(_mode == CLEYE_COLOR_PROCESSED || _mode == CLEYE_COLOR_RAW)
			pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 4);
		else
			pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);

		// Set some camera parameters
		//CLEyeSetCameraParameter(_cam, CLEYE_GAIN, 30);
		//CLEyeSetCameraParameter(_cam, CLEYE_EXPOSURE, 500);
        //CLEyeSetCameraParameter(_cam, CLEYE_AUTO_EXPOSURE, false);
        //CLEyeSetCameraParameter(_cam, CLEYE_AUTO_GAIN, false);
        //CLEyeSetCameraParameter(_cam, CLEYE_AUTO_WHITEBALANCE, false);
        //CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_RED, 100);
        //CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_BLUE, 200);
        //CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_GREEN, 200);

        

		// Start capturing
		CLEyeCameraStart(_cam);

		CvMemStorage* storage = cvCreateMemStorage(0);
		
        IplImage* hsv_frame = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 3);
        IplImage* thresholded   = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 1);
		IplImage* temp = cvCreateImage(cvSize(pCapImage->width >> 1, pCapImage->height >> 1), IPL_DEPTH_8U, 3);

        // Create a window in which the captured images will be presented
        cvNamedWindow( "Camera" , CV_WINDOW_AUTOSIZE );
        cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE );
        cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE );
 
        
 
        //int hl = 100, hu = 115, sl = 95, su = 135, vl = 115, vu = 200;
        int hl = 5, hu = 75, sl = 40, su = 245, vl = 105, vu = 175;
        

		// image capturing loop
		while(_running)
		{

            // Detect a red ball
            CvScalar hsv_min = cvScalar(hl, sl, vl, 0);
            CvScalar hsv_max = cvScalar(hu, su, vu, 0);

			cvGetImageRawData(pCapImage, &pCapBuffer);
			CLEyeCameraGetFrame(_cam, pCapBuffer);

			cvConvertImage(pCapImage, hsv_frame);

            // Get one frame
            if( !pCapImage )
            {
                    fprintf( stderr, "ERROR: frame is null...\n" );
                    getchar();
                    break;
            }
 
                // Covert color space to HSV as it is much easier to filter colors in the HSV color-space.
                cvCvtColor(pCapImage, hsv_frame, CV_RGB2HSV);
                // Filter out colors which are out of range.
                cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
 
                // Memory for hough circles
                CvMemStorage* storage = cvCreateMemStorage(0);
                // hough detector works better with some smoothing of the image
                cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 );
                CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 2,
                                                thresholded->height/4, 100, 50, 10, 400);
 
                for (int i = 0; i < circles->total; i++)
                {
                    float* p = (float*)cvGetSeqElem( circles, i );
                    //printf("Ball! x=%f y=%f r=%f\n\r",p[0],p[1],p[2] );
                    cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])),
                                            3, CV_RGB(0,255,0), -1, 8, 0 );
                    cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])),
                                            cvRound(p[2]), CV_RGB(255,0,0), 3, 8, 0 );
                }
 
                cvShowImage( "Camera", pCapImage ); // Original stream with detected ball overlay
                cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space
                cvShowImage( "EdgeDetection", thresholded ); // The stream after color filtering
 
                cvReleaseMemStorage(&storage);
 
                // Do not release the frame!
 
                //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
                //remove higher bits using AND operator
                int key = cvWaitKey(10);
                
                

                switch(key){
                    case 'q' : hu += 5; break;
                    case 'Q' : hu -= 5; break;
                    
                    case 'a' : hl -= 5; break;
                    case 'A' : hl += 5; break;
                    
                    case 'w' : su += 5; break;
                    case 'W' : su -= 5; break;
                    
                    case 's' : sl -= 5; break;
                    case 'S' : sl += 5; break;
                    
                    case 'e' : vu += 5; break;
                    case 'E' : vu -= 5; break;
                    
                    case 'd' : vl -= 5; break;
                    case 'D' : vl += 5; break;
                }

                if (key != -1){
                    printf("H: %i, S: %i, V: %i\nH: %i, S: %i, V: %i\n\n", hu, su, vu, hl, sl, vl);
                }
            
 

			
		}
		cvReleaseImage(&temp);
		cvReleaseImage(&pCapImage);

		// Stop camera capture
		CLEyeCameraStop(_cam);
		// Destroy camera object
		CLEyeDestroyCamera(_cam);
		// Destroy the allocated OpenCV image
		cvReleaseImage(&pCapImage);
		_cam = NULL;
	}
Exemplo n.º 8
0
int main(int argc, char* argv[])
{
	// necessary variables..
	int erode_level = 11;
	char key='q';
	char s_erode_level[20] ;
	char img_text[]="erode level: ";

	// image container...
	IplImage* img;

	// Font obj for setting text font...
	CvFont font;

	// initialize font object...
	cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX,
			1.0, 1.0, 0, 1, CV_AA);



	// starts the window thread...
	// is necessary to destroy window...
	cvStartWindowThread();

	// do...while loop starts here
	do{

	 img = cvLoadImage("03.jpg", CV_LOAD_IMAGE_UNCHANGED);
	if (!img) {
		printf("Error: Could not open the image file! \n");
		exit(1);
	}

	// get erode level from user
	printf("Enter the level of eroding: ");
	scanf("%d", &erode_level);

	// set limits 0-120
	if(erode_level<=0)
		erode_level=1;

	else if(erode_level>120)
		erode_level=119;



	// erode the image...

	cvErode(img, img, NULL,erode_level);

	/*
	 *
	 * Similarly, dilation can be perfomed...
	 * cvDilate(img,img,NULL,erode_level);
	 *
	 */

	// write erode_level integer to String
	sprintf(s_erode_level, "%d", erode_level) ;

	// Concat "erode Level: " and s_erode_level(actual value)...
	strcpy(img_text,"erode level: ");
	strcat(img_text,s_erode_level);

	// write text to image
		cvPutText(img, img_text, cvPoint(10, 50),
				   &font, cvScalar(255, 255, 255, 0));



	// display image in the "Eroded" window...
		cvNamedWindow("Eroded", CV_WINDOW_AUTOSIZE);
		cvShowImage("Eroded", img);



	//  save to eroded.jpg...
	cvSaveImage("eroded.jpg", img, 0);



	// get input from user
	key = cvWaitKey(0);

	// destroy window and release image...
	cvDestroyWindow("Eroded");
	cvReleaseImage( &img );

	// if 'q' is pressed by user (key)
	//  break out of loop
	//   else continue...
	}while(key!='q');
	// end of do...while loop




	return 0;
}// end of main
Exemplo n.º 9
0
// does a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
int cvCheckChessboard(IplImage* src, CvSize size)
{
    if(src->nChannels > 1)
    {
        cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only",
                __FILE__, __LINE__);
    }

    if(src->depth != 8)
    {
        cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only",
                __FILE__, __LINE__);
    }

    const int erosion_count = 1;
    const float black_level = 20.f;
    const float white_level = 130.f;
    const float black_white_gap = 70.f;

#if defined(DEBUG_WINDOWS)
    cvNamedWindow("1", 1);
    cvShowImage("1", src);
    cvWaitKey(0);
#endif //DEBUG_WINDOWS

    CvMemStorage* storage = cvCreateMemStorage();

    IplImage* white = cvCloneImage(src);
    IplImage* black = cvCloneImage(src);

    cvErode(white, white, NULL, erosion_count);
    cvDilate(black, black, NULL, erosion_count);
    IplImage* thresh = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);

    int result = 0;
    for(float thresh_level = black_level; thresh_level < white_level && !result; thresh_level += 20.0f)
    {
        cvThreshold(white, thresh, thresh_level + black_white_gap, 255, CV_THRESH_BINARY);

#if defined(DEBUG_WINDOWS)
        cvShowImage("1", thresh);
        cvWaitKey(0);
#endif //DEBUG_WINDOWS

        CvSeq* first = 0;
        std::vector<std::pair<float, int> > quads;
        cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
        icvGetQuadrangleHypotheses(first, quads, 1);

        cvThreshold(black, thresh, thresh_level, 255, CV_THRESH_BINARY_INV);

#if defined(DEBUG_WINDOWS)
        cvShowImage("1", thresh);
        cvWaitKey(0);
#endif //DEBUG_WINDOWS

        cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
        icvGetQuadrangleHypotheses(first, quads, 0);

        const size_t min_quads_count = size.width*size.height/2;
        std::sort(quads.begin(), quads.end(), less_pred);

        // now check if there are many hypotheses with similar sizes
        // do this by floodfill-style algorithm
        const float size_rel_dev = 0.4f;

        for(size_t i = 0; i < quads.size(); i++)
        {
            size_t j = i + 1;
            for(; j < quads.size(); j++)
            {
                if(quads[j].first/quads[i].first > 1.0f + size_rel_dev)
                {
                    break;
                }
            }

            if(j + 1 > min_quads_count + i)
            {
                // check the number of black and white squares
                std::vector<int> counts;
                countClasses(quads, i, j, counts);
                const int black_count = cvRound(ceil(size.width/2.0)*ceil(size.height/2.0));
                const int white_count = cvRound(floor(size.width/2.0)*floor(size.height/2.0));
                if(counts[0] < black_count*0.75 ||
                        counts[1] < white_count*0.75)
                {
                    continue;
                }
                result = 1;
                break;
            }
        }
    }


    cvReleaseImage(&thresh);
    cvReleaseImage(&white);
    cvReleaseImage(&black);
    cvReleaseMemStorage(&storage);

    return result;
}
Exemplo n.º 10
0
void display()
{
	// capture from camera
	IplImage* grabImage = cvRetrieveFrame(capture);
	if(flip)
		cvFlip(grabImage, grabImage);

	cvResize(grabImage, resizeImage);
	cvCvtColor(resizeImage, grayImage, CV_BGR2GRAY);
	cvCopyImage(resizeImage, resultImage);

	// update camera pose
	tracker->UpdateCamerapose(grayImage);
	tracker->DrawDebugInfo(resultImage);
	tracker->DrawOutLine(resultImage, true);
	tracker->GetCameraParameter()->DrawInfomation(resultImage, WIDTH/4);
	
	int matchingCount = tracker->GetMatchingCount();

	// adaptive threshold
	int localcount = tracker->GetDetector()->GetKeypointsCount();
	if(keypointCount != localcount) // if updated
	{
		if(localcount > FEATURE_COUNT)
			threshold += 1;
		if(localcount < FEATURE_COUNT)
			threshold -= 1;
		keypointCount = localcount;
		tracker->GetDetector()->SetThreshold(threshold);
	}

	// calculate fps
    fpsStep++;
    if(fpsStep >= FPS_UPDATE_STEP)
    {
		fps = logging->calculateFPS()*(double)FPS_UPDATE_STEP;
		logging->updateTickCount();
		fpsStep = 0;
    }

	char message[100];
    sprintf_s(message, "FPS : %.2lf", fps);
    windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 20), 0.6, message);
	sprintf_s(message, "Feature Count : %d, Threshold : %.0lf", keypointCount, threshold);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 40), 0.6, message);
		sprintf_s(message, "Matching Count : %d", matchingCount);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 60), 0.6, message);

	sprintf_s(message, "Press 'Space' to track the current image");
	windage::Utils::DrawTextToImage(resultImage, cvPoint(WIDTH-270, HEIGHT-10), 0.5, message);
	sprintf_s(message, "Press 'F' to flip image");
	windage::Utils::DrawTextToImage(resultImage, cvPoint(WIDTH-270, HEIGHT-25), 0.5, message);
	cvShowImage("tracking information window", resultImage);

	// clear screen
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// draw virtual object
	glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();

	
	
	double radian = ANGLE * CV_PI / 180.0;
	double dx = sin(radian) * VIRTUAL_CAMERA_DISTANCE;
	double dy = cos(radian) * VIRTUAL_CAMERA_DISTANCE;
	gluLookAt(dx, dy, 2000, 0.0, 0.0, 600.0, 0.0, 0.0, 1.0);

	glPushMatrix();
	{
		// draw reference image & coordinate
		renderer->DrawReference((double)WIDTH, (double)HEIGHT);
		renderer->DrawAxis((double)WIDTH / 4.0);

		// draw camera image & position
		renderer->DrawCamera(tracker->GetCameraParameter(), resizeImage);
	}
	glPopMatrix();

	glutSwapBuffers();
}
void MainWindow::stereoVisionTest(QString image_path,int cornersX,int cornersY){
    trace("stereoVisionTest ... ");
    StereoVision* sv = new StereoVision(CAM_WIDTH,CAM_HEIGHT);

    IplImage* images[2];

    //perform calibration based on sets of 2 images (chessboard)
    sv->calibrationStart(cornersX,cornersY);
    // READ IN THE LIST OF CHESSBOARDS:
    QString file_name;
    for(int i=0;;i++){
        for(int lr=0;lr<2;lr++){
            file_name = tr("%1%2%3.jpg").arg(image_path).arg(i).arg(lr ? 'L' : 'R');
            trace(file_name);
            images[lr] = cvLoadImage( file_name.toLatin1(), 0 );
        }
        if(images[0] && images[1]){
            trace(file_name);
            sv->calibrationAddSample(images[0],images[1]);
            for(int lr=0;lr<2;lr++)
                cvReleaseImage(&images[lr]);
        }else{
            break;
        }
    }
    sv->calibrationEnd();
    sv->calibrationSave("stereovisiontest.dat");

    //Once saved calibartion data can be loaded later with:
    //sv->calibrationLoad("stereovisiontest.dat");

   //test our calibartion with first image set:
    CvSize imageSize = sv->getImageSize();
   for(int lr=0;lr<2;lr++){
        QString file_name = tr("%1%2%3.jpg").arg(image_path).arg(0).arg(lr ? 'L' : 'R');
        images[lr] = cvLoadImage( file_name.toLatin1(), 0 );
    }


    //this method will compute  sv->imagesRectified[0],sv->imagesRectified[1],sv->imageDepth,sv->imageDepthNormalized
    sv->stereoProcess(images[0],images[1]);


    //merge 2 rectified images in one image (pair) and display horizontal lines
    //to evaluate rectification.
    CvMat* pair = cvCreateMat( imageSize.height, imageSize.width*2,CV_8UC3 );
    CvMat part;
    cvGetCols( pair, &part, 0, imageSize.width );
    cvCvtColor( sv->imagesRectified[0], &part, CV_GRAY2BGR );
    cvGetCols( pair, &part, imageSize.width,imageSize.width*2 );
    cvCvtColor( sv->imagesRectified[1], &part, CV_GRAY2BGR );
    for(int j = 0; j < imageSize.height; j += 16 )
        cvLine( pair, cvPoint(0,j),cvPoint(imageSize.width*2,j),CV_RGB(0,255,0));


    //display the results
    cvNamedWindow( "rectified", 1 );
    cvShowImage( "rectified", pair );

    cvNamedWindow( "depth", 1 );
    cvShowImage( "depth", sv->imageDepthNormalized);

    //free up memory
    cvReleaseImage(&images[0]);
    cvReleaseImage(&images[1]);
    cvReleaseMat(&pair);

}
Exemplo n.º 12
0
int main(void)
{
    /* Input video dari kamera */
    CvCapture* input_video;
    input_video = cvCaptureFromCAM(CV_CAP_ANY);
    /* cek keserdiaan kamera */
    if (input_video == NULL)
        {
            fprintf(stderr, "Error: Kamera tidak terdeteksi.\n");
    return -1;
}
    /* mengambil frame dari video */
    cvQueryFrame( input_video );
    /* mengambil properti dari video */
    CvSize frame_size;
    frame_size.height = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
    frame_size.width = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );
    /* membuat window baru bernama optical flow */
        cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
        long current_frame = 0;
    while(1)
{
        static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C = NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;
        /* mendapatkan frame selanjutnya */
    frame = cvQueryFrame( input_video );
/* mengalokasikan gambar */
    	allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
		cvConvertImage(frame, frame1_1C, 0);
		allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
		cvConvertImage(frame, frame1, 0);

		/* mendapatkan frame ke dua */
		frame = cvQueryFrame( input_video );
		allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
		cvConvertImage(frame, frame2_1C, 0);

		/* Shi and Tomasi Feature Tracking! */
		/* mengalokasikan gambar */
		allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
		allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );

		/* Preparation: This array will contain the features found in frame 1. */
		CvPoint2D32f frame1_features[400];

		/* menginisialisasi jumlah feature / garis panah */
		int number_of_features;
		number_of_features = 400;

		/* menjalankan algoritma Shi dan Tomasi */
		cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, & number_of_features, .01, .01, NULL);

		/* Pyramidal Lucas Kanade Optical Flow! */
		/* menyimpan lokasi poin dari frame 1 di frame 2 dalam array */
		CvPoint2D32f frame2_features[400];
		char optical_flow_found_feature[400];
		float optical_flow_feature_error[400];

		CvSize optical_flow_window = cvSize(3,3);

		CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

		allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
		allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

		 /* menjalakan Pyramidal Lucas Kanade Optical Flow */
		cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, number_of_features, optical_flow_window, 5, optical_flow_found_feature, optical_flow_feature_error, optical_flow_termination_criteria, 0 );
		
		/* membuat panah */
		for(int i = 0; i < number_of_features; i++)
		{
			/* skip bila tidak ada feature */
			if ( optical_flow_found_feature[i] == 0 ) continue;
			int line_thickness; line_thickness = 1;
			/* warna garis */
			CvScalar line_color; line_color = CV_RGB(255,0,0);
			/* menggambarkan panah */
			CvPoint p,q;
			p.x = (int) frame1_features[i].x;
			p.y = (int) frame1_features[i].y;
			q.x = (int) frame2_features[i].x;
			q.y = (int) frame2_features[i].y;
			double angle; angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
			double hypotenuse; hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );
			q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
			q.y = (int) (p.y - 3 * hypotenuse * sin(angle));
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
			p.x = (int) (q.x + 9 * cos(angle + pi / 4));
			p.y = (int) (q.y + 9 * sin(angle + pi / 4));
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
			p.x = (int) (q.x + 9 * cos(angle - pi / 4));
			p.y = (int) (q.y + 9 * sin(angle - pi / 4));
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
		}
		/* menampilkan gambar */
		cvShowImage("Optical Flow", frame1);
		
			/* keluar */
		int key_pressed;
		key_pressed = cvWaitKey(10);
		if (key_pressed == 27) break;
		current_frame++;
	}
void ConformalResizing::ShowConstrains(const IplImage *pBackGround, 
									   const vector<ConstrainUnits>& quads, 
									   const vector<ConstrainUnits>& qaud5s, 
									   const vector<ConstrainUnits>& edges, 
									   const char *winName /* =  */, 
									   const int waite /* = 0 */,
									   const char* saveName /* = NULL */)
{
	IplImage* pMixedImg = cvCloneImage(pBackGround);
	cvNamedWindow(winName);

	// Show quads
	for (size_t i = 0; i < quads.size(); i++)
	{
		CvPoint pnts[4];
		for (int j = 0; j < 4; j++)
		{
			pnts[j].x = (int)(quads[i].pnts[j].x);
			pnts[j].y = (int)(quads[i].pnts[j].y);
		}
		
		cvLineAA(pMixedImg, pnts[0], pnts[1], 255);
		cvLineAA(pMixedImg, pnts[0], pnts[2], 255);
		cvLineAA(pMixedImg, pnts[3], pnts[1], 255);
		cvLineAA(pMixedImg, pnts[3], pnts[2], 255);
	}

	// Show qaud5s
	for (size_t i = 0; i < qaud5s.size(); i++)
	{
		CvPoint pnts[5];
		for (int j = 0; j < 5; j++)
		{
			pnts[j].x = (int)(qaud5s[i].pnts[j].x);
			pnts[j].y = (int)(qaud5s[i].pnts[j].y);
		}

		cvLineAA(pMixedImg, pnts[0], pnts[1], 255);
		cvLineAA(pMixedImg, pnts[0], pnts[2], 255);
		cvLineAA(pMixedImg, pnts[3], pnts[1], 255);
		cvLineAA(pMixedImg, pnts[3], pnts[2], 255);
		cvLineAA(pMixedImg, pnts[0], pnts[4], 128);
		cvLineAA(pMixedImg, pnts[1], pnts[4], 128);
		cvLineAA(pMixedImg, pnts[2], pnts[4], 128);
		cvLineAA(pMixedImg, pnts[3], pnts[4], 128);
	}

	// Show edges
	for (size_t i = 0; i < edges.size(); i++)
	{
		CvScalar color;
		if(ispg)
			color=GenColor1(i);
		else
			color=GenColor(i);
		//swap(color.val[0],color.val[2]);
		for (int j = 0; j < edges[i].n; j++)
		{
			CvPoint point = cvPoint((int)(edges[i].pnts[j].x + 0.5), (int)(edges[i].pnts[j].y + 0.5));
			//cvCircle(pMixedImg, point, 3, CmShow::gColors[i % CM_SHOW_COLOR_NUM], 2);
			cvCircle(pMixedImg, point, 3, color, 2);
			//cvCircle(pMixedImg, point, 3, CmShow::gColors[edges[i].ind[j] % CM_SHOW_COLOR_NUM], 2);
		}
	}

	cvNamedWindow(winName);
	cvShowImage(winName, pMixedImg);
	if (saveName != NULL)
		cvSaveImage(saveName, pMixedImg);
	cvReleaseImage(&pMixedImg);
}
CvSize ConformalResizing::GetConstrainUnits(const IplImage* srcImg32F,  
										 const IplImage* img8U3C,
										 const CvSize szGrid, 
										 vector<ConstrainUnits>& quads, 
										 vector<ConstrainUnits>& qaud5s,
										 vector<ConstrainUnits>& edges,
										 vector<double>& ppos,/*added 2009.08.16*/
										 int meshQuadSize)
{

	// Get importance map
	//IplImage* impImg32F = cvCreateImage(cvGetSize(srcImg32F), IPL_DEPTH_32F, 1);
	//cvScale(srcImg32F, impImg32F);

	IplImage* impImg32F = NULL;
	if (strlen(FileNames::impName) > 0)
	{
		IplImage* impMap = cvLoadImage(FileNames::impName, CV_LOAD_IMAGE_GRAYSCALE);
		//if (impMap != NULL)
		{
			NormalizeImg(impMap, meshQuadSize);
			impImg32F = cvCreateImage(cvGetSize(impMap), IPL_DEPTH_32F, 1);
			cvScale(impMap, impImg32F, 1/255.0);
			cvReleaseImage(&impMap);
			cvNamedWindow("Importance");
			cvShowImage("Importance", impImg32F);
			cvAddS(impImg32F, cvScalarAll(gSet("minWeight")), impImg32F);
		}
	}

	CmImportance imp;
	if (impImg32F == NULL)
	{
		double weights[5];
		weights[0] = gSet("edgeWeight");
		weights[1] = gSet("faceWeight");
		weights[2] = gSet("motionWeight");
		weights[3] = gSet("contrastWeight"); 
		weights[4] = gSet("minWeight");

		impImg32F = imp.calcEnergy(img8U3C, weights);
		imp.showEnergy();
	}

	{
		IplImage* impSave = cvCreateImage(cvGetSize(impImg32F), IPL_DEPTH_8U, 1);
		cvScale(impImg32F, impSave, 255);
		//cvSaveImage(FileNames::outImp, impSave);
		cvReleaseImage(&impSave);
	}

//#ifdef _DEBUG
//	cvSave("impd.xml", img8U3C, "impImg32F");
//#else
//	cvSave("imp.xml", img8U3C, "impImg32F");
//#endif // _DEBUG

	//
	IplImage *pGridNodeX64F, *pGridNodeY64F;
	CmCvHelper::MeshGrid(pGridNodeX64F, pGridNodeY64F, 0, srcImg32F->width, 0, srcImg32F->height, meshQuadSize, meshQuadSize);
	double (*pGridPos)[2] = new double[szGrid.width * szGrid.height][2]; //Original edge point position within each grid. (x, y)
	int *pGridIdx = new int[szGrid.width * szGrid.height];  // Index of grid point variable
	int *pGridIdxE = new int[szGrid.width * szGrid.height]; // Index of edge contain this grid point
	typedef vector<pair<int, int>> EdgePos;  // Position of edge point in grid
	vector<EdgePos> edgePntPos;
	int varaInd = (szGrid.height + 1) * (szGrid.width + 1);
	/*added 2009.08.16*/
	{
		ppos.reserve(varaInd*2);
		for(int y=0;y<=szGrid.height;y++)
			for(int x=0;x<=szGrid.width;x++)
			{
				ppos.push_back(x*meshQuadSize);//x
				ppos.push_back(y*meshQuadSize);//y
			}
	}
	{
		//Get Edges
		const IplImage* pLineInd;
		vector<CEdge> edge;
		CDetectEdge detEdge(edge, gSet("Sigma"));
		detEdge.Initial(srcImg32F);
		detEdge.CalFirDer();
		detEdge.NoneMaximalSuppress((float)gSet("LinkEndBound"), (float)gSet("LinkStartBound"));
		detEdge.Link(gSet["ShortRemoveBound"]);
		pLineInd = detEdge.LineIdx();

		int* pTmp = pGridIdx;  // Borrow memory inside
		memset(pTmp, 0xff, szGrid.width * szGrid.height * sizeof(int));
		memset(pGridIdxE, 0xff, szGrid.width * szGrid.height * sizeof(int));

		for (int y = 0; y < srcImg32F->height; y++)
		{
			int* lineIdx = (int*)(pLineInd->imageData + pLineInd->widthStep * y); 
			for (int x = 0; x < srcImg32F->width; x++)
			{
				if (lineIdx[x] > 0) // it's an edge point
				{
					int dx = x % meshQuadSize;
					dx = min(dx, meshQuadSize - dx);
					int dy = y % meshQuadSize;
					dy = min(dy, meshQuadSize - dy);
					dx = min(dx, dy);
					int gridPos = y / meshQuadSize * szGrid.width + x / meshQuadSize;

					if (dx > pTmp[gridPos] && dx > gSet("minEdgeRatio") * meshQuadSize)
					{
						pGridPos[gridPos][0] = x;
						pGridPos[gridPos][1] = y;
						pGridIdxE[gridPos] = lineIdx[x];
						pTmp[gridPos] = dx;
					}
				}
			}
		}

		map<int, EdgePos> edgePntPosMap;
		for (int y = 0; y < szGrid.height; y++)
		{
			for (int x = 0; x < szGrid.width; x++)
			{
				int gridPos = y * szGrid.width + x;
				int idx = pGridIdxE[gridPos];
				if (idx > 0) // an edge point within grid
				{
					edgePntPosMap[idx].push_back(pair<int, int>(x, y));
				}
			}
		}

		for (map<int, EdgePos>::iterator it = edgePntPosMap.begin(); it != edgePntPosMap.end(); it++)
		{
			EdgePos& edPos = it->second;
			if (edPos.size() >= 3)
				edgePntPos.push_back(edPos);
			else
			{
				for (size_t i = 0; i < edPos.size(); i++)
					pGridIdxE[edPos[i].first + edPos[i].second * szGrid.width] = -1;
			}
		}

		for (int y = 0; y < szGrid.height; y++)
		{
			for (int x = 0; x < szGrid.width; x++)
			{
				int gridPos = y * szGrid.width + x;
				int idx = pGridIdxE[gridPos];
				if (idx > 0) // an edge point within grid
				{
					pGridIdx[gridPos] = varaInd;
					varaInd++;
					ppos.push_back(pGridPos[gridPos][0]);
					ppos.push_back(pGridPos[gridPos][1]);
				}
				else
					pGridIdx[gridPos] = -1;
			}
		}

		for (size_t i = 0; i < edgePntPos.size(); i++)
		{
			for (size_t j = 0; j < edgePntPos[i].size(); j++)
			{
				int gridPos = edgePntPos[i][j].first + szGrid.width * edgePntPos[i][j].second;
				pGridIdxE[gridPos] = i;
			}
		}

		CmShow::Labels(pLineInd, "Labels", 1); // Show Line Idx
		//CmShow::MixedMesh(img8U3C, pGridNodeX64F, pGridNodeY64F, szGrid, pGridPos, pGridIdxE, "Mixed", 1);
	}

	CvSize szConstrainA = cvSize(varaInd, 0);

	// Get constrain units
	{
		IplImage* gridImp32F = cvCreateImage(szGrid, IPL_DEPTH_32F, 1);
		cvResize(impImg32F, gridImp32F, CV_INTER_AREA/*added 2009-7-27*/);

		double* pNodeX = (double*)(pGridNodeX64F->imageData);
		double* pNodeY = (double*)(pGridNodeY64F->imageData);

		// Quads constrains and qaud5 constrains
		for (int y = 0; y < szGrid.height; y++)
		{
			for (int x = 0; x < szGrid.width; x++)
			{
				int gridpos = szGrid.width * y + x;

				ConstrainUnits unit;
				unit.SetNumber(pGridIdxE[gridpos] >= 0 ? 5 : 4);

				unit.ind[0] = y * (szGrid.width + 1) + x;
				unit.ind[1] = unit.ind[0] + szGrid.width + 1;
				unit.ind[2] = unit.ind[0] + 1;
				unit.ind[3] = unit.ind[0] + szGrid.width + 2;

				if (pGridIdxE[gridpos] >= 0)
				{
					unit.ind[4] = pGridIdx[gridpos];
					unit.pnts[4].x = pGridPos[gridpos][0];
					unit.pnts[4].y = pGridPos[gridpos][1];
				}

				for (int i = 0; i < 4; i++)
				{
					unit.pnts[i].x = pNodeX[unit.ind[i]];
					unit.pnts[i].y = pNodeY[unit.ind[i]];
				}

				unit.imp = CV_IMAGE_ELEM(gridImp32F, float, y, x);
				if (pGridIdxE[gridpos] >= 0)
				{
					//unit.imp *=1.2;
					qaud5s.push_back(unit);					
				}
				else
					quads.push_back(unit);
			}
		}

		szConstrainA.height = quads.size() * 8 + qaud5s.size() * 10;

		// Edge constrains
		for (size_t i = 0; i < edgePntPos.size(); i++)
		{
			ConstrainUnits unit;
			unit.SetNumber(edgePntPos[i].size());
			double imp = 0;
			for (size_t j = 0; j < edgePntPos[i].size(); j++)
			{
				int gridPos = edgePntPos[i][j].first + edgePntPos[i][j].second * szGrid.width;
				unit.ind[j] = pGridIdx[gridPos];
				unit.pnts[j].x = pGridPos[gridPos][0];
				unit.pnts[j].y = pGridPos[gridPos][1];
				imp += ((float*)(gridImp32F->imageData))[gridPos];
			}
			unit.imp = imp/unit.n * gSet("EdgeConstrainRation");
			edges.push_back(unit);
			szConstrainA.height += unit.n * 2;
		}

		cvReleaseImage(&gridImp32F);
		ShowConstrains(img8U3C, quads, qaud5s, edges, "Constrains", 1, FileNames::srcMeshName);
	}
	delete []pGridIdxE;
	delete []pGridIdx;
	delete []pGridPos;
	cvReleaseImage(&pGridNodeY64F);
	cvReleaseImage(&pGridNodeX64F);
	//cvReleaseImage(&impImg32F);

	szConstrainA.width *= 2;
	return szConstrainA;
}
Exemplo n.º 15
0
void cvShowManyImages(char* title, int nArgs, ...) {

    // img - Used for getting the arguments 
    IplImage *img;

    // DispImage - the image in which input images are to be copied
    IplImage *DispImage;

    int size;
    int i;
    int m, n;
    int x, y;

    // w - Maximum number of images in a row 
    // h - Maximum number of images in a column 
    int w, h;

    // scale - How much we have to resize the image
    float scale;
    int max;

    // If the number of arguments is lesser than 0 or greater than 12
    // return without displaying 
    if(nArgs <= 0) {
        printf("Number of arguments too small....\n");
        return;
    }
    else if(nArgs > 12) {
        printf("Number of arguments too large....\n");
        return;
    }
    // Determine the size of the image, 
    // and the number of rows/cols 
    // from number of arguments 
    else if (nArgs == 1) {
        w = h = 1;
        size = 300;
    }
    else if (nArgs == 2) {
        w = 2; h = 1;
        size = 300;
    }
    else if (nArgs == 3 || nArgs == 4) {
        w = 2; h = 2;
        size = 300;
    }
    else if (nArgs == 5 || nArgs == 6) {
        w = 3; h = 2;
        size = 200;
    }
    else if (nArgs == 7 || nArgs == 8) {
        w = 4; h = 2;
        size = 200;
    }
    else {
        w = 4; h = 3;
        size = 150;
    }

    // Create a new 3 channel image
    DispImage = cvCreateImage( cvSize(100 + size*w, 60 + size*h), 8, 3 );

    // Used to get the arguments passed
    va_list args;
    va_start(args, nArgs);

    // Loop for nArgs number of arguments
    for (i = 0, m = 20, n = 20; i < nArgs; i++, m += (20 + size)) {

        // Get the Pointer to the IplImage
        img = va_arg(args, IplImage*);

        // Check whether it is NULL or not
        // If it is NULL, release the image, and return
        if(img == 0) {
            printf("Invalid arguments");
            cvReleaseImage(&DispImage);
            return;
        }

        // Find the width and height of the image
        x = img->width;
        y = img->height;

        // Find whether height or width is greater in order to resize the image
        max = (x > y)? x: y;

        // Find the scaling factor to resize the image
        scale = (float) ( (float) max / size );

        // Used to Align the images
        if( i % w == 0 && m!= 20) {
            m = 20;
            n+= 20 + size;
        }

        // Set the image ROI to display the current image
        cvSetImageROI(DispImage, cvRect(m, n, (int)( x/scale ), (int)( y/scale )));

        // Resize the input image and copy the it to the Single Big Image
        cvResize(img, DispImage);

        // Reset the ROI in order to display the next image
        cvResetImageROI(DispImage);
    }

    // Create a new window, and show the Single Big Image
    cvNamedWindow( title, 1 );
    cvShowImage( title, DispImage);

    cvWaitKey();
    cvDestroyWindow(title);

    // End the number of arguments
    va_end(args);

    // Release the Image Memory
    cvReleaseImage(&DispImage);
}
Exemplo n.º 16
0
int main(int argc, char** argv)
{
    /* A matrix data */
    const float A[] = { 1, 1, 0, 1 };

    IplImage* img = cvCreateImage( cvSize(500,500), 8, 3 );
    CvKalman* kalman = cvCreateKalman( 2, 1, 0 );
    /* state is (phi, delta_phi) - angle and angle increment */
    CvMat* state = cvCreateMat( 2, 1, CV_32FC1 );
    CvMat* process_noise = cvCreateMat( 2, 1, CV_32FC1 );
    /* only phi (angle) is measured */
    CvMat* measurement = cvCreateMat( 1, 1, CV_32FC1 );
    CvRandState rng;
    int code = -1;

    cvRandInit( &rng, 0, 1, -1, CV_RAND_UNI );

    cvZero( measurement );
    cvNamedWindow( "Kalman", 1 );

    for(;;)
    {
        cvRandSetRange( &rng, 0, 0.1, 0 );
        rng.disttype = CV_RAND_NORMAL;

        cvRand( &rng, state );

        memcpy( kalman->transition_matrix->data.fl, A, sizeof(A));
        cvSetIdentity( kalman->measurement_matrix, cvRealScalar(1) );
        cvSetIdentity( kalman->process_noise_cov, cvRealScalar(1e-5) );
        cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(1e-1) );
        cvSetIdentity( kalman->error_cov_post, cvRealScalar(1));
        /* choose random initial state */
        cvRand( &rng, kalman->state_post );

        rng.disttype = CV_RAND_NORMAL;

        for(;;)
        {
            #define calc_point(angle)                                      \
                cvPoint( cvRound(img->width/2 + img->width/3*cos(angle)),  \
                         cvRound(img->height/2 - img->width/3*sin(angle)))

            float state_angle = state->data.fl[0];
            CvPoint state_pt = calc_point(state_angle);

            /* predict point position */
            const CvMat* prediction = cvKalmanPredict( kalman, 0 );
            float predict_angle = prediction->data.fl[0];
            CvPoint predict_pt = calc_point(predict_angle);
            float measurement_angle;
            CvPoint measurement_pt;

            cvRandSetRange( &rng,
                            0,
                            sqrt(kalman->measurement_noise_cov->data.fl[0]),
                            0 );
            cvRand( &rng, measurement );

            /* generate measurement */
            cvMatMulAdd( kalman->measurement_matrix, state, measurement, measurement );

            measurement_angle = measurement->data.fl[0];
            measurement_pt = calc_point(measurement_angle);

            /* plot points */
            #define draw_cross( center, color, d )                        \
                cvLine( img, cvPoint( center.x - d, center.y - d ),       \
                             cvPoint( center.x + d, center.y + d ),       \
                             color, 1, 0 );                               \
                cvLine( img, cvPoint( center.x + d, center.y - d ),       \
                             cvPoint( center.x - d, center.y + d ),       \
                             color, 1, 0 )

            cvZero( img );
            draw_cross( state_pt, CV_RGB(255,255,255), 3 );
            draw_cross( measurement_pt, CV_RGB(255,0,0), 3 );
            draw_cross( predict_pt, CV_RGB(0,255,0), 3 );
            cvLine( img, state_pt, predict_pt, CV_RGB(255,255,0), 3, 0 );

            /* adjust Kalman filter state */
            cvKalmanCorrect( kalman, measurement );

            cvRandSetRange( &rng,
                            0,
                            sqrt(kalman->process_noise_cov->data.fl[0]),
                            0 );
            cvRand( &rng, process_noise );
            cvMatMulAdd( kalman->transition_matrix,
                         state,
                         process_noise,
                         state );

            cvShowImage( "Kalman", img );
            code = cvWaitKey( 100 );

            if( code > 0 ) /* break current simulation by pressing a key */
                break;
        }
        if( code == 27 ) /* exit by ESCAPE */
            break;
    }

    return 0;
}
Exemplo n.º 17
0
int main(){
    Tserial *com;
    char ch;
    com = new Tserial();
    com->connect("COM3", 4800, spNONE);
    CvCapture *capture = 0;
    IplImage  *frame = 0;
    int       key = 0;
    com->sendChar('a');
    int rpx,rpy,ryx,ryy,bx,by,rpx1,rpy1,ryx1,ryy1,bx1,by1;
    double theta1=0.1,theta2=0.1;
    /* initialize camera */
    capture = cvCaptureFromCAM(0);
    cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1024 );

    cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 720 );

    /* always check */
    if ( !capture ) {
        fprintf( stderr, "Cannot open initialize webcam!\n" );
        return 1;
    }
 
    /* create a window for the video */
    cvNamedWindow( "image", CV_WINDOW_AUTOSIZE );
 
    while( key != 'q' ) {
        /* get a frame */
        img = cvQueryFrame( capture );
      
        /* always check */
        if( !img ) break;
        
        img0=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
        cvCvtColor(img,img0,CV_BGR2HSV);
        // cvSetMouseCallback( "image", mouseHandler, img0 );
      // cvThreshold(img0, img0, 85, 255, CV_THRESH_BINARY);
       
        /* display curent frame */
        cvShowImage( "image", img0 );
        rpx=corp(img0,1);
        ryx=cory(img0,1);
        bx=corr(img0,1);
        rpy=corp(img0,0);
        ryy=cory(img0,0);
        by=corr(img0,0);
        rpx1=rpx-ryx;
        rpy1=rpx-ryy;
        bx1=bx-ryx;
        by1=by-ryy;
        theta1=atan((double)rpy1/(double)rpx1);
        theta2=atan((double)by1/(double)bx1);
        if(theta1>0 && theta1-theta2>0 && rpx1>0)
        com->sendChar('r');
        else if(theta1<=0 && M_PI+theta1-theta2>0)
        com->sendChar('l');
        else if(theta1<0 && theta2>=0 && rpx<ryx)
        com->sendChar('r');
        else if(theta1>0 && theta1-theta2<0)
        com->sendChar('l');
        else if(theta1>0 && theta1-theta2>0 && rpx1<0)
        com->sendChar('l');
        else if(theta1-theta2==0.0 && rpx1*bx1>0)
        com->sendChar('f');
        else if(theta1-theta2==0.0 && rpx1*bx1<0){
        com->sendChar('r');
        cvWaitKey(5);
        }
        /* exit if user press 'q' */
        key = cvWaitKey( 1 );
        cvReleaseImage(&img0);
        
            }


    /* free memory */
    cvDestroyWindow( "image" );
    cvReleaseCapture( &capture );
    com->disconnect();
    return 0;
}
Exemplo n.º 18
0
void show_img(IplImage* img){
	cvNamedWindow("Debug", 1);
	cvShowImage("Debug", img);
	cvWaitKey(0);
	cvDestroyWindow("Debug");
}
Exemplo n.º 19
0
int main(int argc, char* argv[]) {
    // Create parameters - for now default parameters are modified to desired settings
    // TODO: Read in parameters from a file
    MOGParams params;

    // Create a Pixel SOM for learning 
    ImageModel *pImageModel = new ImageModel();

    // Create windows to display images 
    cvNamedWindow("ImageWindow", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("BackgroundWindow", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("ForegroundWindow", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("ForegroundMorph", CV_WINDOW_AUTOSIZE);

    // Temporary variables 
    std::string szFileName;
    IplImage* img = 0;
    IplImage* imgBG = 0;
    IplImage* imgFG = 0;
    IplImage* imgMorph = 0;

    for (int i = 0; i < params.nNumberOfFrames; ++i) {
        printf("Processing frame: %d\n", i);

        // Load the frame from file 
        szFileName = params.GetFileName(i + params.nStartFrameNumber);
        img = cvLoadImage(szFileName.c_str());
        if (!img) {
            printf("Cannot load image file: \n");
            return 0;
        }

        // Create/initialize images & initialize the system
        if (i == 0) {
            imgFG = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
            imgMorph = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
            imgBG = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
            pImageModel->Initialize(img, params);
        }

        // Add the background based on the current image 
        pImageModel->AddBackground(img, params, imgBG, imgFG);

        // Open the foreground image to remove noise and fill gaps 
        cvMorphologyEx(imgFG, imgMorph, NULL, NULL, CV_MOP_OPEN);

        // Show all the images 
        cvShowImage("ImageWindow", img);
        cvShowImage("BackgroundWindow", imgBG);
        cvShowImage("ForegroundWindow", imgFG);
        cvShowImage("ForegroundMorph", imgMorph);

        // Delay for a second 
        cvWaitKey(100);

        // Release allocated image 
        cvReleaseImage(&img);
    }

    // Release the allocated temporary images 
    if (imgFG != NULL)
        cvReleaseImage(&imgFG);
    if (imgMorph != NULL)
        cvReleaseImage(&imgMorph);
    if (imgBG != NULL)
        cvReleaseImage(&imgBG);

    // Destroy all the windows 
    cvDestroyWindow("ImageWindow");
    cvDestroyWindow("BackgroundWindow");
    cvDestroyWindow("ForegroundWindow");
    cvDestroyWindow("ForegroundMorph");

    // Return
    return 0;
}
Exemplo n.º 20
0
int main(int argc, char** argv) {
	pthread_t thread_s;
	int key;

	if (argc == 2) {
		capture = cvCaptureFromFile(argv[1]);
	} else {
		capture = cvCaptureFromCAM(0);
	}

	if (!capture) {
		quit("cvCapture failed", 1);
	}

	img0 = cvQueryFrame(capture);
	img1 = cvCreateImage(cvGetSize(img0), IPL_DEPTH_8U, 1);

	cvZero(img1);
	cvNamedWindow("stream_server", CV_WINDOW_AUTOSIZE);

	/* print the width and height of the frame, needed by the client */
	fprintf(stdout, "width: %d\nheight: %d\n\n", img0->width, img0->height);
	fprintf(stdout, "Press 'q' to quit.\n\n");

	/* run the streaming server as a separate thread */
	if (pthread_create(&thread_s, NULL, streamServer, NULL )) {
		quit("pthread_create failed.", 1);
	}

	while (key != 'q') {
		/* get a frame from camera */
		img0 = cvQueryFrame(capture);
		if (!img0)
			break;

		img0->origin = 0;
		//cvFlip(img0, img0, -1);

		/**
		 * convert to grayscale
		 * note that the grayscaled image is the image to be sent to the client
		 * so we enclose it with pthread_mutex_lock to make it thread safe
		 */
		pthread_mutex_lock(&mutex);
		cvCvtColor(img0, img1, CV_BGR2GRAY);
		is_data_ready = 1;
		pthread_mutex_unlock(&mutex);

		/* also display the video here on server */
		cvShowImage("stream_server", img0);
		key = cvWaitKey(30);
	}

	/* user has pressed 'q', terminate the streaming server */
	if (pthread_cancel(thread_s)) {
		quit("pthread_cancel failed.", 1);
	}

	/* free memory */
	cvDestroyWindow("stream_server");
	quit(NULL, 0);
}
Exemplo n.º 21
0
int main(int argc, char **argv)
{
	cascade = (CvHaarClassifierCascade *) cvLoad("cascade.xml", 0, 0, 0);
	IplImage *faceDepth = cvCreateImage(cvSize(100, 100), 8, 1);
	char name[1000];
	int imageCnt=0;
	PCAWrapper pca;

	if (!cascade)
	{
		fprintf(stderr, "ERROR: Could not load classifier cascade\n");
		return -1;
	}

	storage = cvCreateMemStorage(0);

	int key = -1;

	while ((key & 0xFF) != 0x1B)
	{							// Break when ESC is pressed.
		key = cvWaitKey(10);

		IplImage *image = freenect_sync_get_rgb_cv(0);
		if (!image)
		{
			printf("Error: Kinect not connected?\n");
			return -1;
		}

		// DEBUG;
		/* 
		   IplImage *irimage = freenect_sync_get_ir_cv(0); if (!irimage) {
		   printf("Error: Kinect not connected?\n"); return -1; } */

		// DEBUG;

		// cvCvtColor(image, image, CV_RGB2BGR);
		IplImage *depth = freenect_sync_get_depth_cv(0);
		if (!depth)
		{
			printf("Error: Kinect not connected?\n");
			return -1;
		}

		// DEBUG;


		// printf("%d\n", key);

		if ((key & 0xFF) == 'p')
		{
			detect_and_draw(image, depth, faceDepth, true);
		}
		detect_and_draw(image, depth, faceDepth, false);

		if ((key & 0xFF) == 'i')
		{
			imageCnt++;

			sprintf(name, "face%d", imageCnt);

			printf("face %s registered!\n", name);

			pca.insertImage(faceDepth, name);
		}

		if((key & 0xFF) == 'r')
		{
			printf("%s Dist = %f\n", pca.search(faceDepth), pca.searchDist(faceDepth));
		}

		if((key & 0xFF) == 't')
		{
			printf("Training...\n");
			pca.training();
		}

		cvShowImage("RGB", image);

		// DEBUG;
		// cvShowImage("IR", irimage);
		cvShowImage("Depth", depth);
		// cvShowImage("Depth", GlViewColor(depth));
	}
	return 0;
}
Exemplo n.º 22
0
Arquivo: utils.cpp Projeto: cherip/dct
void show_image(IplImage *src, const char *name) {
    cvNamedWindow(name, CV_WINDOW_AUTOSIZE);
    cvShowImage(name, src);
    cvWaitKey(0);
}
Exemplo n.º 23
0
int main (int argc, const char * argv[])
{
	if ( argc != 4 ) {
		fprintf(stderr, "Expected 3 image filenames <no_cup> <with_cup> <outdoor_scene>.\n");
		exit(1);
	}
	
	IplImage* src1 = cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR);
	
	if ( src1 == NULL ) {
		fprintf(stderr, "Couldn't load file 1 %s\n", argv[1]);
		exit(1);
	}
	
	IplImage* src2 = cvLoadImage(argv[2], CV_LOAD_IMAGE_COLOR);
	
	if ( src2 == NULL ) {
		fprintf(stderr, "Couldn't load file 2 %s\n", argv[2]);
		exit(1);
	}
	
	if ( src1->width != src2->width || src1->height != src2->height ) {
		fprintf(stderr, "2 images should have the same width and height but they don't.\n");
		exit(1);
	}
	
	IplImage* grayscale1 = cvCreateImage(cvSize(src1->width, src1->height), src1->depth, 1);
	IplImage* grayscale2 = cvCreateImage(cvSize(src2->width, src2->height), src2->depth, 1);
	
	cvCvtColor(src1, grayscale1, CV_RGB2GRAY);
	cvCvtColor(src2, grayscale2, CV_RGB2GRAY);
	
	IplImage* diff = cvCreateImage(cvSize(src2->width, src2->height), src2->depth, 1);
	cvAbsDiff(grayscale1, grayscale2, diff);
	
	IplImage* result = cvCreateImage(cvSize(src2->width, src2->height), src2->depth, 1);
	cvThreshold(diff, result, 40, 255, CV_THRESH_BINARY);
	
	IplImage* mopResult = cvCreateImage(cvSize(diff->width, diff->height), diff->depth, 1);
	cvMorphologyEx(result, mopResult, NULL, NULL, CV_MOP_OPEN, 1);
	
	// start of exercise 5.6 specific code (the above is from 5.5).
	
	CvConnectedComp largestComponent;
	bzero(&largestComponent, sizeof(largestComponent));
	CvPoint largestRegionPoint = cvPoint(-1, -1);
	
	int x, y;
	for(y = 0; y < mopResult->height; ++y)
	{
		unsigned char* row = (unsigned char*)(mopResult->imageData + mopResult->widthStep * y);
		
		for(x = 0; x < mopResult->width; ++x)
		{
			if ( row[x] == 255 )
			{
				CvConnectedComp region;
				cvFloodFill(mopResult, cvPoint(x, y), cvScalarAll(100), cvScalarAll(0), cvScalarAll(0), &region, 4, NULL);
				
				if ( largestRegionPoint.x == -1 )
				{
					// first region found
					largestRegionPoint = cvPoint(x, y);
					largestComponent = region;
				}
				else if ( largestComponent.area < region.area )
				{
					// Clear the last area.
					cvFloodFill(mopResult, largestRegionPoint, cvScalarAll(0), cvScalarAll(0), cvScalarAll(0), NULL, 4, NULL);
					largestRegionPoint = cvPoint(x, y);
					largestComponent = region;
				}
				else
				{
					cvFloodFill(mopResult, cvPoint(x, y), cvScalarAll(0), cvScalarAll(0), cvScalarAll(0), NULL, 4, NULL);
				}
			}
		}
	}
	
	if ( largestRegionPoint.x != -1 )
	{
		cvFloodFill(mopResult, largestRegionPoint, cvScalarAll(255), cvScalarAll(0), cvScalarAll(0), NULL, 4, NULL);
	}
	
	// Start of Exercise 5.7. Before this is the same as 5.6
	
	IplImage* outdoorScene = cvLoadImage(argv[3], CV_LOAD_IMAGE_COLOR);
	
	if ( outdoorScene == NULL ) {
		fprintf(stderr, "Couldn't load file 3 %s\n", argv[3]);
		exit(1);
	}
	
	cvCopy(src2, outdoorScene, mopResult);
	
	cvNamedWindow("outdoorScene", CV_WINDOW_NORMAL);
	cvShowImage("outdoorScene", outdoorScene);
	cvWaitKey(0);
	
    return 0;
}
Exemplo n.º 24
0
 int main() {
  CvPoint pt1b,pt2b, pt1t,pt2t,ptarry[4];
  int tempwidth,tempheight;
  CvRect regt,rectROIbot,rectROItop;
  rectROItop=cvRect(0,0,80,10);
  rectROIbot=cvRect(0,50,80,10);
  CvPoint b_cir_center,t_cir_center;
  CvPoint frame_center;
  CvPoint A,B,C,D;
  CvPoint temp;
  double angle,spinsize;
  int cir_radius=1; 
  int frame_width=160, frame_height=120;
  IplImage* frame;
  IplImage* threshframe;
  IplImage* hsvframe;
  IplImage* threshframebot;
  IplImage* threshframetop;
  IplImage* modframe;
  IplImage* dilframetop;
  IplImage* dilframebot;
  int moddiv=2,seq=0,seqdiv=2;
  int release=0, rmax=100;
  int modfheight, modfwidth;
  unsigned char sendBuf;
   CvCapture* capture = cvCaptureFromCAM( -1 );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
  cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   while ( 1 ) {
     // Get one frame
      frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }

     modfheight = frame->height;
     modfwidth = frame->width;
      modframe = cvCreateImage(cvSize((int)(modfwidth/moddiv),(int)(modfheight/moddiv)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
      hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
      
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)

      threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
    // cvInRangeS(hsvframe,cvScalar(0, 180, 140),cvScalar(15, 230, 235),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) red
     cvInRangeS(hsvframe,cvScalar(100, 50, 50),cvScalar(150, 110, 110),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);

      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
 
//////////////////////////////////////////////////////////////////////////////////////////
    if (seq==0) {
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);
     dilframebot = cvCreateImage(cvGetSize(threshframebot),8,1);
     cvDilate(threshframebot,dilframebot,NULL,2); //cvDilate(input frame,

   //  tempwidth=cvGetSize(dilframebot).width;
   //  tempheight=cvGetSize(dilframebot).height;
   //  printf("dilframe: %d, %d \n",tempwidth,tempheight);
     CBlobResult blobs_bot;
     blobs_bot = CBlobResult(dilframebot,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_bot.Filter(blobs_bot,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_bot;
     blobs_bot.GetNthBlob(CBlobGetArea(),0,biggestblob_bot); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1b.x = biggestblob_bot.MinX()*moddiv;
     pt1b.y = biggestblob_bot.MinY()*moddiv+100;
     pt2b.x = biggestblob_bot.MaxX()*moddiv;
     pt2b.y = biggestblob_bot.MaxY()*moddiv+100;
     b_cir_center.x=(pt1b.x+pt2b.x)/2;
     b_cir_center.y=(pt1b.y+pt2b.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////////
    if(seq==seqdiv){
      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
      dilframetop = cvCreateImage(cvGetSize(threshframetop),8,1);
     cvDilate(threshframetop,dilframetop,NULL,2); //cvDilate(input frame,
     CBlobResult blobs_top;
     blobs_top = CBlobResult(dilframetop,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_top.Filter(blobs_top,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_top;
     blobs_top.GetNthBlob(CBlobGetArea(),0,biggestblob_top); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1t.x = biggestblob_top.MinX()*moddiv;
     pt1t.y = biggestblob_top.MinY()*moddiv;
     pt2t.x = biggestblob_top.MaxX()*moddiv;
     pt2t.y = biggestblob_top.MaxY()*moddiv;
     t_cir_center.x=(pt1t.x+pt2t.x)/2;
     t_cir_center.y=(pt1t.y+pt2t.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////
   if(seq==seqdiv+2) {
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
     A.x=frame_center.x-4;
     A.y=frame_center.y;
     B.x=frame_center.x+4;
     B.y=frame_center.y;
     C.y=frame_center.y-4;
     C.x=frame_center.x;
     D.y=frame_center.y+4;
     D.x=frame_center.x;
     cvRectangle(frame,pt1t,pt2t,cvScalar(255,0,0),1,8,0);
     cvRectangle(frame,pt1b,pt2b,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
     //cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0);
     cvCircle( frame, b_cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle
     cvLine(frame, A, B,cvScalar(255,0,255),2,8,0);
     cvLine(frame, C, D,cvScalar(255,0,255),2,8,0);
    

    if (b_cir_center.x!=0&&b_cir_center.y!=100) 
    {
    cvLine(frame, b_cir_center, frame_center,cvScalar(0,255,0),1,8,0);
    }

    if(t_cir_center.x!=0&&t_cir_center.y!=0)
     {
     cvLine(frame, frame_center, t_cir_center,cvScalar(255,255,0),1,8,0);
     }
     if ((b_cir_center.x!=0&&b_cir_center.y!=100)&&(t_cir_center.x!=0&&t_cir_center.y!=0)) 
     {
     cvLine(frame, b_cir_center, t_cir_center,cvScalar(0,255,255),1,8,0);
     printf("%d, %d, %d, %d\n",t_cir_center.x,t_cir_center.y,b_cir_center.x,b_cir_center.y);
     }
		
}
    seq++;
    seq=seq%(seqdiv+4);
     cvShowImage( "mywindow", frame); // show output image
//     cvShowImage( "bot", threshframebot);
//     cvShowImage( "top", threshframetop);
/*   cvReleaseImage(&frame);
   cvReleaseImage(&threshframe);
   cvReleaseImage(&hsvframe);
   cvReleaseImage(&threshframebot);
   cvReleaseImage(&modframe);
   cvReleaseImage(&dilframetop);
   cvReleaseImage(&dilframebot);*/
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27  ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   //v4l.flush();
   cvDestroyWindow( "mywindow" );
   
return 0;
 }
int main(int argc, char** argv )
{
	IplImage *img, *filterMask = NULL;
	CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);
	ASDFrameSequencer *sequencer;
	CvFont base_font;
	char caption[2048], s[256], windowName[256];
	long int clockTotal = 0, numFrames = 0;
	std::clock_t clock;

	if (argc < 4)
	{
		help(argv);
		sequencer = new ASDFrameSequencerWebCam();
		(dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);

		if (! sequencer->isOpen())
		{
			std::cout << std::endl << "Error: Cannot initialize the default Webcam" << std::endl << std::endl;
		}
	}
	else
	{
		sequencer = new ASDFrameSequencerImageFile();
		(dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here

	}
	std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");

	cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);
	cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);

	// Usage:
	//		c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000

	std::cout << "Press ESC to stop." << std::endl << std::endl;
	while ((img = sequencer->getNextImage()) != 0)
	{
		numFrames++;

		if (filterMask == NULL)
		{
			filterMask = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1);
		}
		clock = std::clock();
		filter.process(img, filterMask);	// DETECT SKIN
		clockTotal += (std::clock() - clock);

		displayBuffer(img, filterMask, 0, 255, 0);

		sequencer->getFrameCaption(caption);
		std::sprintf(s, "%s - %d x %d", caption, img->width, img->height);
		putTextWithShadow(img, s, cvPoint(10, img->height-35), &base_font);

		std::sprintf(s, "Average processing time per frame: %5.2fms", (double(clockTotal*1000/CLOCKS_PER_SEC))/numFrames);
		putTextWithShadow(img, s, cvPoint(10, img->height-15), &base_font);

		cvShowImage (windowName, img);
		cvReleaseImage(&img);

		if (cvWaitKey(1) == 27)
			break;
	}

	sequencer->close();
	delete sequencer;

	cvReleaseImage(&filterMask);

	cvDestroyWindow(windowName);

	std::cout << "Finished, " << numFrames << " frames processed." << std::endl;

	return 0;
}
Exemplo n.º 26
0
IplImage *
camera_control_query_frame(CameraControl* cc,
        PSMove_timestamp *ts_grab, PSMove_timestamp *ts_retrieve)
{
    IplImage* result;

#if defined(CAMERA_CONTROL_USE_CL_DRIVER)
    // assign buffer-pointer to address of buffer
    cvGetRawData(cc->frame4ch, &cc->pCapBuffer, 0, 0);

    CLEyeCameraGetFrame(cc->camera, cc->pCapBuffer, 2000);

    // convert 4ch image to 3ch image
    const int from_to[] = { 0, 0, 1, 1, 2, 2 };
    const CvArr** src = (const CvArr**) &cc->frame4ch;
    CvArr** dst = (CvArr**) &cc->frame3ch;
    cvMixChannels(src, 1, dst, 1, from_to, 3);

    result = cc->frame3ch;
#elif defined(CAMERA_CONTROL_USE_PS3EYE_DRIVER)
    int stride = 0;
    unsigned char *pixels = ps3eye_grab_frame(cc->eye, &stride);

    // Convert pixels from camera to BGR
    unsigned char *cvpixels;
    cvGetRawData(cc->framebgr, &cvpixels, 0, 0);
    yuv422_to_bgr(pixels, stride, cvpixels, cc->width, cc->height);

    result = cc->framebgr;
#else
    cvGrabFrame(cc->capture);
    if (ts_grab != NULL) {
        *ts_grab = _psmove_timestamp();
    }
    result = cvRetrieveFrame(cc->capture, 0);
    if (ts_retrieve != NULL) {
        *ts_retrieve = _psmove_timestamp();
    }
#endif

    if (cc->deinterlace == PSMove_True) {
        /**
         * Dirty hack follows:
         *  - Clone image
         *  - Hack internal variables to make an image of all odd lines
         **/
        IplImage *tmp = cvCloneImage(result);
        tmp->imageData += tmp->widthStep; // odd lines
        tmp->widthStep *= 2;
        tmp->height /= 2;

        /**
         * Use nearest-neighbor to be faster. In my tests, this does not
         * cause a speed disadvantage, and tracking quality is still good.
         *
         * This will scale the half-height image "tmp" to the original frame
         * size by doubling lines (so we can still do normal circle tracking).
         **/
        cvResize(tmp, result, CV_INTER_NN);

        /**
         * Need to revert changes in tmp from above, otherwise the call
         * to cvReleaseImage would cause a crash.
         **/
        tmp->height = result->height;
        tmp->widthStep = result->widthStep;
        tmp->imageData -= tmp->widthStep; // odd lines
        cvReleaseImage(&tmp);
    }

    // undistort image
    if (cc->mapx && cc->mapy) {
        cvRemap(result, cc->frame3chUndistort,
                cc->mapx, cc->mapy,
                CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS,
                cvScalarAll(0));
        result = cc->frame3chUndistort;
    }


#if defined(CAMERA_CONTROL_DEBUG_CAPTURED_IMAGE)
    cvShowImage("camera input", result);
    cvWaitKey(1);
#endif

    return result;
}
Exemplo n.º 27
0
int main(int argc, char* argv[]) {
  
  CvCapture* capture;// = cvCreateCameraCapture( 0 );
 // assert( capture );

	if(argc != 4){
		help();
		return -1;
	}
	help();
	board_w = atoi(argv[1]);
	board_h = atoi(argv[2]);
	int board_n  = board_w * board_h;
	CvSize board_sz = cvSize( board_w, board_h );
	FILE *fptr = fopen(argv[3],"r");
	char names[2048];
	//COUNT THE NUMBER OF IMAGES:
	while(fscanf(fptr,"%s ",names)==1){
		n_boards++;
	}
	rewind(fptr);

  cvNamedWindow( "Calibration" );
  //ALLOCATE STORAGE
  CvMat* image_points      = cvCreateMat(n_boards*board_n,2,CV_32FC1);
  CvMat* object_points     = cvCreateMat(n_boards*board_n,3,CV_32FC1);
  CvMat* point_counts      = cvCreateMat(n_boards,1,CV_32SC1);

///  CvMat * image_points	= cvCreateMat(1, n_boards*board_n, CV_32FC2);
///  CvMat * object_points = cvCreateMat(1, n_boards*board_n, CV_32FC3);
///  CvMat * point_counts  = cvCreateMat(1, n_boards, CV_32SC1);
  
  CvMat* intrinsic_matrix  = cvCreateMat(3,3,CV_32FC1);
  CvMat* distortion_coeffs = cvCreateMat(4,1,CV_32FC1);


  IplImage* image = 0;// = cvQueryFrame( capture );
  IplImage* gray_image = 0; //for subpixel
  CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
  int corner_count;
  int successes = 0;
  int step;

  for( int frame=0; frame<n_boards; frame++ ) {
	  fscanf(fptr,"%s ",names);

	  if(image){
		  cvReleaseImage(&image);
		  image = 0;
	  }
	  image = cvLoadImage( names);
	  if(gray_image == 0  && image) //We'll need this for subpixel accurate stuff
		  gray_image = cvCreateImage(cvGetSize(image),8,1);

	  if(!image)
		  printf("null image\n");

      int found = cvFindChessboardCorners(
        image,
        board_sz,
        corners,
        &corner_count, 
        CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
      );

	  //Get Subpixel accuracy on those corners
	  cvCvtColor(image, gray_image, CV_BGR2GRAY);
	  cvFindCornerSubPix(gray_image, corners, corner_count, 
			  cvSize(11,11),cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
	  //Draw it

	  cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
      cvShowImage( "Calibration", image );

      // If we got a good board, add it to our data
      //
      if( corner_count == board_n ) {
		  step = successes*board_n;
//	printf("Found = %d for %s\n",found,names);
        for( int i=step, j=0; j<board_n; ++i,++j ) {
 ///         CV_MAT_ELEM(*image_points, CvPoint2D32f,0,i) = cvPoint2D32f(corners[j].x,corners[j].y);
 ///         CV_MAT_ELEM(*object_points,CvPoint3D32f,0,i) = cvPoint3D32f(j/board_w, j%board_w, 0.0f);
          CV_MAT_ELEM(*image_points, float,i,0) = corners[j].x;
          CV_MAT_ELEM(*image_points, float,i,1) = corners[j].y;
          CV_MAT_ELEM(*object_points,float,i,0) = j/board_w;
          CV_MAT_ELEM(*object_points,float,i,1) = j%board_w;
          CV_MAT_ELEM(*object_points,float,i,2) = 0.0f;
 		
		}
//        CV_MAT_ELEM(*point_counts, int,0,successes) = board_n;
         CV_MAT_ELEM(*point_counts, int,successes,0) = board_n;		
       successes++;
      }
      
 //    if( successes == n_boards ) break;

    int c = cvWaitKey(15);
	if(c == 'p') {
		c = 0;
		while(c != 'p' && c != 27){
			c = cvWaitKey(250);
		}
	}
	if(c == 27)
		return 0;
  }
Exemplo n.º 28
0
/*
 * thread for displaying the opencv content
 */
void *cv_threadfunc (void *ptr) {
	IplImage* timg = cvCloneImage(rgbimg); // Image we do our processing on
	IplImage* dimg = cvCloneImage(rgbimg); // Image we draw on
	CvSize sz = cvSize( timg->width & -2, timg->height & -2);
	IplImage* outimg = cvCreateImage(sz, 8, 3);

	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* squares; // Sequence for squares - sets of 4 points
	CvSeq* contours; // Raw contours list
	CvSeq* result; // Single contour being processed

	CBlobResult blobs;
	CBlob *currentBlob;

	IplImage *pyr = cvCreateImage(cvSize(sz.width/2, sz.height/2), 8, 1);

	// Set region of interest
	cvSetImageROI(timg, cvRect(0, 0, sz.width, sz.height));
	cvSetImageROI(dimg, cvRect(0, 0, sz.width, sz.height));

	// Processing and contours
	while (1) {
		squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);

		pthread_mutex_lock( &mutex_rgb );
		cvCopy(rgbimg, dimg, 0);
		cvCopy(rgbimg, timg, 0);
		pthread_mutex_unlock( &mutex_rgb );

		// BLUR TEST
		// cvPyrDown(dimg, pyr, 7);
		// cvPyrUp(pyr, timg, 7);

		// DILATE TEST
		IplConvKernel* element = cvCreateStructuringElementEx(5, 5, 2, 2, 0);
		IplConvKernel* element2 = cvCreateStructuringElementEx(3, 3, 1, 1, 0);
		cvDilate(timg, timg, element, 2);
		cvErode(timg, timg, element2, 3);

		// THRESHOLD TEST 
		cvThreshold(timg, timg, 200, 255, CV_THRESH_BINARY);

		// Output processed or raw image.
		cvCvtColor(timg, outimg, CV_GRAY2BGR);

		// BLOB TEST
		blobs = CBlobResult( timg, (IplImage*)NULL, 0, true );
		// blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 50 );
		
		printf("Blobs: %d\n", blobs.GetNumBlobs());

		CBlob biggestBlob;
		blobs.GetNthBlob( CBlobGetArea(), 1, biggestBlob );
		biggestBlob.FillBlob( outimg, CV_RGB(255, 0, 0) );
		CvSeq* dest;
		biggestBlob.GetConvexHull(dest);
		
		// for (int i = 0; i < blobs.GetNumBlobs(); i++ )
		// {
		// 	currentBlob = blobs.GetBlob(i);
		// 	currentBlob->FillBlob( outimg, CV_RGB(255,0,0) );
		// }
		

//		// CONTOUR FINDING
//		cvFindContours(timg, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
//
//		while (contours)
//		{
//			// Approximate contour, accuracy proportional to perimeter of contour; may want to tune accuracy.
//			result = cvApproxPoly(contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours) * 0.02, 0);
//			// Filter small contours and contours w/o 4 vertices (filters noise, finds rectangles)
//			if (result->total == 4 && 
//				fabs(cvContourArea(result, CV_WHOLE_SEQ)) > 600 && 
//				cvCheckContourConvexity(result))
//			{
//				// Skipped checking whether angles were close to 90 degrees here; may want to implement.
//				// Probably also want to check if it's square enough to filter out ex. long windows.
//
//				for (int i = 0; i < 4; i++)
//				{
//					// Write vertices to output sequence
//					cvSeqPush(squares, (CvPoint*)cvGetSeqElem(result, i));
//				}
//			}
//
//			// Take next contour
//			contours = contours->h_next;
//		}
//
//
//		// DRAW RECTANGLES
//		CvSeqReader reader;
//		cvStartReadSeq(squares, &reader, 0);
//
//		// Read 4 points at a time
//		CvPoint pt[4];
//		CvPoint *rect = pt;
//		CvRect out[4];
//		CvRect *outrect = out;
//		for (int i = 0; i < squares->total; i += 4)
//		{
//			int count = 4;
//			
//			// Which point is which corner is unpredictable.
//			CV_READ_SEQ_ELEM(pt[0], reader); 
//			CV_READ_SEQ_ELEM(pt[1], reader);
//			CV_READ_SEQ_ELEM(pt[2], reader);
//			CV_READ_SEQ_ELEM(pt[3], reader);
//			// Draw rectangle on output
//			cvPolyLine(outimg, &rect, &count, 1, 1, CV_RGB(0,255,0), 1, CV_AA, 0);
//			// Make rectangles
//			// Print (temporary)
//			printf("Rect[0]: %d, %d\n", pt[0].x, pt[0].y);
//			printf("Rect[1]: %d, %d\n", pt[1].x, pt[1].y);
//			printf("Rect[2]: %d, %d\n", pt[2].x, pt[2].y);
//			printf("Rect[3]: %d, %d\n\n", pt[3].x, pt[3].y);
//			fflush(stdout);
//
//		}
//
		// Print on order
		if( cvWaitKey( 15 )==27 )
		{
				}

		cvShowImage (FREENECTOPENCV_WINDOW_N,outimg);
		cvClearMemStorage(storage);
	}
	pthread_exit(NULL);
}
Exemplo n.º 29
0
int main( int argc, char** argv )
{
    IplImage *current_frame=NULL;
	CvSize size;
	size.height = 300; size.width = 200;
	IplImage *corrected_frame = cvCreateImage( size, IPL_DEPTH_8U, 3 );
	IplImage *labelled_image=NULL;
	IplImage *vertical_edge_image=NULL;
    int user_clicked_key=0;
    
    // Load the video (AVI) file
    CvCapture *capture = cvCaptureFromAVI( "./Postboxes.avi" );
    // Ensure AVI opened properly
    if( !capture )
		return 1;    
    
    // Get Frames Per Second in order to playback the video at the correct speed
    int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
    
	// Explain the User Interface
    printf( "Hot keys: \n"
		    "\tESC - quit the program\n"
            "\tSPACE - pause/resume the video\n");

	CvPoint2D32f from_points[4] = { {3, 6}, {221, 11}, {206, 368}, {18, 373} };
	CvPoint2D32f to_points[4] = { {0, 0}, {200, 0}, {200, 300}, {0, 300} };
	CvMat* warp_matrix = cvCreateMat( 3,3,CV_32FC1 );
	cvGetPerspectiveTransform( from_points, to_points, warp_matrix );

	// Create display windows for images
	cvNamedWindow( "Input video", 0 );
	cvNamedWindow( "Vertical edges", 0 );
    cvNamedWindow( "Results", 0 );

	// Setup mouse callback on the original image so that the user can see image values as they move the
	// cursor over the image.
    cvSetMouseCallback( "Input video", on_mouse_show_values, 0 );
	window_name_for_on_mouse_show_values="Input video";

    while( user_clicked_key != ESC ) {
		// Get current video frame
        current_frame = cvQueryFrame( capture );
		image_for_on_mouse_show_values=current_frame; // Assign image for mouse callback
        if( !current_frame ) // No new frame available
			break;

		cvWarpPerspective( current_frame, corrected_frame, warp_matrix );

		if (labelled_image == NULL)
		{	// The first time around the loop create the image for processing
			labelled_image = cvCloneImage( corrected_frame );
			vertical_edge_image = cvCloneImage( corrected_frame );
		}
		check_postboxes( corrected_frame, labelled_image, vertical_edge_image );

		// Display the current frame and results of processing
        cvShowImage( "Input video", current_frame );
        cvShowImage( "Vertical edges", vertical_edge_image );
        cvShowImage( "Results", labelled_image );
        
        // Wait for the delay between frames
        user_clicked_key = cvWaitKey( 1000 / fps );
		if (user_clicked_key == ' ')
		{
			user_clicked_key = cvWaitKey(0);
		}
	}
    
    /* free memory */
    cvReleaseCapture( &capture );
    cvDestroyWindow( "video" );
 
    return 0;
}
void showDst(){
	cvShowImage("dst",dst);
}