示例#1
0
文件: main.cpp 项目: syalo/RPGgame
int main(int argc,char* argv[])
{
	int w, h;
	int board[12][12];//表示、非表示のフラグ格納用配列	
	int mask[12][12];

	int mine_sum = 10;
	IplImage *img,*img_out;
	int point[2];
	bool clear_flag = false;
	bool false_flag = false;

	if((img = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_32F, 3)) == NULL) {
		printf("画像リソースを確保できません。\n");
		return -1;
	}
	
	w = WIDTH, h = HEIGHT;
	img_out = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 3);	// 整数型画像リソース確保

	cvNamedWindow("mine_s");
	cvSetMouseCallback("mine_s", on_mouse, point);
	cvSet(img, cvScalar(0,0,0), 0);	// 画像リソース初期化
	
	img_set(img,h,w);		//imgに色を付ける
	map_set(board);			//board初期化
	map_set(mask);			//mask初期化
	mine_set(mask,mine_sum);//地雷セット
	mask_set(mask);

	while(1){
			//左クリックされた時の処理
			if(get_Lpoint().x != -1 && get_Lpoint().y != -1){
				L_button_reverse(get_Lpoint().x,get_Lpoint().y,board,mask);
				character(img,mask,board);
				//地雷を踏んだか調べる
				if(mask[get_Lpoint().y/40][get_Lpoint().x/40-1] == -2){
					false_flag = true;
				}
				set_Lpoint(-1,-1);
			}
			//GameClearCheck
			clear_flag = clear_check(board,mask,mine_sum);
			if(get_Rpoint().x != -1 && get_Rpoint().y != -1){
				R_button_reverse(get_Rpoint().x,get_Rpoint().y,board);
				exclamation_draw(img,board);
			}

		if(clear_flag){
			printf("GameClear\n");
			//break;
		}
		if(false_flag){
			printf("GameOver\n");
			//break;
		}
		cvConvertImage(img, img_out);	// 浮動小数点数型から整数型画像に変換
		cvShowImage("mine_s", img_out);			// 表示
		if(cvWaitKey(33) == 27) break;		// ESCを押した時終了
	
	}
	// ウィンドウ・キャプチャ・画像リソースの解放
	cvDestroyAllWindows();
	cvReleaseImage( &img_out);
 	cvReleaseImage( &img);
	return 0;
}
void OpencvRFclassifier::learn(std::vector< std::vector<float> >& pfeatures, std::vector<int>& plabels){

     if (_rf){
	delete _rf;
	_trees.clear();	
	_tree_weights.clear();
     }
     _rf = new CvRTrees;
 	

     int rows = pfeatures.size();
     int cols = pfeatures[0].size();	 	
     
     printf("Number of samples and dimensions: %d, %d\n",rows, cols);
     if ((rows<1)||(cols<1)){
	return;
     }

//      clock_t start = clock();    
     std::time_t start, end;
     std::time(&start);	


     CvMat *features = cvCreateMat(rows, cols, CV_32F);	
     CvMat *labels = cvCreateMat(rows, 1 , CV_32F);	
     float* datap = features->data.fl;
     float* labelp = labels->data.fl;	 	


     int numzeros=0; 	
     for(int i=0; i < rows; i++){
	 labelp[i] = plabels[i];
	 numzeros += ( labelp[i] == -1? 1 : 0 );
	 for(int j=0; j < cols ; j++){
	     datap[i*cols+j]  = (float)pfeatures[i][j];	
	 }
     }
     printf("Number of merge: %d\n",numzeros);



     // 1. create type mask
     CvMat* var_type = cvCreateMat( features->cols + 1, 1, CV_8U );
     cvSet( var_type, cvScalarAll(CV_VAR_NUMERICAL) );
     cvSetReal1D( var_type, features->cols, CV_VAR_CATEGORICAL );

     // define the parameters for training the random forest (trees)

     float priors[] = {1,1};  // weights of each classification for classes
     // (all equal as equal samples of each digit)

     CvRTParams params = CvRTParams( _max_depth, // max depth :the depth of the tree
                                     10, // min sample count:  minimum samples required at a leaf node for it to be split
                                     0, // regression accuracy: N/A here
                                     false, // compute surrogate split, no missing data
                                     15, // max number of categories (use sub-optimal algorithm for larger numbers)
                                     priors, // the array of prior for each class
                                     false,  // calculate variable importance
                                     5,       // number of variables randomly selected at node and used to find the best split(s).
                                     _tree_count,	 // max number of trees in the forest
                                     0.001f, // forest accuracy
                                     CV_TERMCRIT_ITER //|	CV_TERMCRIT_EPS // termination cirteria
                                      );
	

     // 3. train classifier
     _rf->train( features, CV_ROW_SAMPLE, labels, 0, 0, var_type, 0, params);
         //CvRTParams(10,10,0,false,15,0,true,4,100,0.01f,CV_TERMCRIT_ITER));

     float correct = 0;
     for(int  i = 0; i < features->rows ; i++ ){
         float r;
         CvMat sample;
         cvGetRow( features, &sample, i );
	
         r = _rf->predict_prob( &sample );
	 r = (r>0.5)? 1 :-1;
         r = fabs((float)r - labels->data.fl[i]) <= FLT_EPSILON ? 1 : 0;


	 correct += r;
      }
      	

    std::time(&end);
    printf("Time required to learn RF: %.2f sec\n", (difftime(end,start))*1.0);
//     printf("Time required to learn RF: %.2f sec\n", ((float)clock() - start) / CLOCKS_PER_SEC);
    printf("with training set accuracy :%.3f\n", correct/features->rows*100.);

    _tree_count = _rf->get_tree_count();	
    for(int i = 0; i < _tree_count; i++){
	CvForestTree* treep = _rf->get_tree(i);
	_trees.push_back(treep); 	
    }
    //int ntrees = _rf->get_tree_count();	
    _tree_weights.resize(_tree_count, 1.0/_tree_count); 		

     cvReleaseMat( &features );
     cvReleaseMat( &labels );	
     cvReleaseMat( &var_type );	
}
//--------------------------------------------------------------------------------
void ofxCvColorImage::set(int value){
    cvSet(cvImage, cvScalar(value, value, value));
}
示例#4
0
//
//USAGE:  ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
int main(int argc, char** argv)
{
 	IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
    IplImage *ImaskAVG = 0,*ImaskAVGCC = 0;
    IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
    CvCapture* capture = 0;

	int startcapture = 1;
	int endcapture = 30;
	int c,n;

	maxMod[0] = 3;  //Set color thresholds to default values
	minMod[0] = 10;
	maxMod[1] = 1;
	minMod[1] = 1;
	maxMod[2] = 1;
	minMod[2] = 1;
	float scalehigh = HIGH_SCALE_NUM;
	float scalelow = LOW_SCALE_NUM;

	if(argc < 3) {
		printf("ERROR: Too few parameters\n");
		help();
	}else{
		if(argc == 3){
			printf("Capture from Camera\n");
			capture = cvCaptureFromCAM( 0 );
		}
		else {
			printf("Capture from file %s\n",argv[3]);
	//		capture = cvCaptureFromFile( argv[3] );
			capture = cvCreateFileCapture( argv[3] );
			if(!capture) { printf("Couldn't open %s\n",argv[3]); return -1;}

            minMod = {42, 24, 33};
            maxMod = {14, 3, 2};
		}
		if(isdigit(argv[1][0])) { //Start from of background capture
			startcapture = atoi(argv[1]);
			printf("startcapture = %d\n",startcapture);
		}
		if(isdigit(argv[2][0])) { //End frame of background capture
			endcapture = atoi(argv[2]);
			printf("endcapture = %d\n",endcapture);
		}

		if(argc > 4){ //See if parameters are set from command line
			//FOR AVG MODEL
			if(argc >= 5){
				if(isdigit(argv[4][0])){
					scalehigh = (float)atoi(argv[4]);
				}
			}
			if(argc >= 6){
				if(isdigit(argv[5][0])){
					scalelow = (float)atoi(argv[5]);
				}
			}
			//FOR CODEBOOK MODEL, CHANNEL 0
			if(argc >= 7){
				if(isdigit(argv[6][0])){
					maxMod[0] = atoi(argv[6]);
				}
			}
			if(argc >= 8){
				if(isdigit(argv[7][0])){
					minMod[0] = atoi(argv[7]);
				}
			}
			//Channel 1
			if(argc >= 9){
				if(isdigit(argv[8][0])){
					maxMod[1] = atoi(argv[8]);
				}
			}
			if(argc >= 10){
				if(isdigit(argv[9][0])){
					minMod[1] = atoi(argv[9]);
				}
			}
			//Channel 2
			if(argc >= 11){
				if(isdigit(argv[10][0])){
					maxMod[2] = atoi(argv[10]);
				}
			}
			if(argc >= 12){
				if(isdigit(argv[11][0])){
					minMod[2] = atoi(argv[11]);
				}
			}

		}
	}

    /*dancer jiwei*/
    double vdfps = 0.0;
    CvSize vdsize = cvSize(0,0);
    //vdfps = cvGetCaptureProperty ( capture, CV_CAP_PROP_FPS);
    getVideoInfo( capture, vdfps, vdsize);
    CvVideoWriter* writer = cvCreateVideoWriter( "dancer.avi",
                                                CV_FOURCC('D','X','5','0'),
                                                vdfps,
                                                vdsize);
    //end dancer jiwei


	//MAIN PROCESSING LOOP:
	bool pause = false;
	bool singlestep = false;

    if( capture )
    {
      cvNamedWindow( "Raw", 1 );
		cvNamedWindow( "AVG_ConnectComp",1);
		cvNamedWindow( "ForegroundCodeBook",1);
		cvNamedWindow( "CodeBook_ConnectComp",1);
 		cvNamedWindow( "ForegroundAVG",1);
 		//Only dancer jiwei 2012.3.3
 		cvNamedWindow( "OnlyDancer",1);
        cvNamedWindow( "RectDancer",1);
        int i = -1;

        for(;;)
        {
    			if(!pause){
//        		if( !cvGrabFrame( capture ))
//                	break;
//            	rawImage = cvRetrieveFrame( capture );
				rawImage = cvQueryFrame( capture );
				++i;//count it
//				printf("%d\n",i);
				if(!rawImage)
					break;
				//REMOVE THIS FOR GENERAL OPERATION, JUST A CONVIENIENCE WHEN RUNNING WITH THE SMALL tree.avi file
				//if(i == 56){
				if(i==0){
					pause = 1;
					printf("\n\nVideo paused for your convienience at frame 50 to work with demo\n"
					"You may adjust parameters, single step or continue running\n\n");
					help();
				}
			}
			if(singlestep){
				pause = true;
			}
			//First time:
			if(0 == i) {
				printf("\n . . . wait for it . . .\n"); //Just in case you wonder why the image is white at first
				//AVG METHOD ALLOCATION
				AllocateImages(rawImage);
				scaleHigh(scalehigh);
				scaleLow(scalelow);
				ImaskAVG = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskAVGCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				cvSet(ImaskAVG,cvScalar(255));
				//CODEBOOK METHOD ALLOCATION:
				yuvImage = cvCloneImage(rawImage);
				ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				cvSet(ImaskCodeBook,cvScalar(255));
				imageLen = rawImage->width*rawImage->height;
				cB = new codeBook [imageLen];
				for(int f = 0; f<imageLen; f++)
				{
 					cB[f].numEntries = 0;
				}
				for(int nc=0; nc<nChannels;nc++)
				{
					cbBounds[nc] = 10; //Learning bounds factor
				}
				ch[0] = true; //Allow threshold setting simultaneously for all channels
				ch[1] = true;
				ch[2] = true;
			}
			//If we've got an rawImage and are good to go:
        	if( rawImage )
        	{
				cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
				//This is where we build our background model
				if( !pause && i >= startcapture && i < endcapture  ){
					//LEARNING THE AVERAGE AND AVG DIFF BACKGROUND
					accumulateBackground(rawImage);
					//LEARNING THE CODEBOOK BACKGROUND
					pColor = (uchar *)((yuvImage)->imageData);
					for(int c=0; c<imageLen; c++)
					{
						cvupdateCodeBook(pColor, cB[c], cbBounds, nChannels);
						pColor += 3;
					}
				}
				//When done, create the background model
				if(i == endcapture){
					createModelsfromStats();
				}
				//Find the foreground if any
				if(i >= endcapture) {
					//FIND FOREGROUND BY AVG METHOD:
					backgroundDiff(rawImage,ImaskAVG);
					cvCopy(ImaskAVG,ImaskAVGCC);
					cvconnectedComponents(ImaskAVGCC);
					//FIND FOREGROUND BY CODEBOOK METHOD
					uchar maskPixelCodeBook;
					pColor = (uchar *)((yuvImage)->imageData); //3 channel yuv image
					uchar *pMask = (uchar *)((ImaskCodeBook)->imageData); //1 channel image
					for(int c=0; c<imageLen; c++)
					{
						 maskPixelCodeBook = cvbackgroundDiff(pColor, cB[c], nChannels, minMod, maxMod);
						*pMask++ = maskPixelCodeBook;
						pColor += 3;
					}
					//This part just to visualize bounding boxes and centers if desired
					cvCopy(ImaskCodeBook,ImaskCodeBookCC);
					cvconnectedComponents(ImaskCodeBookCC);
				}

				/* Only Dancer
                    jiwei 2012.3.3*/
				IplImage *ImaDancer = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 );
				cvZero(ImaDancer);
				cvCopy( rawImage, ImaDancer, ImaskCodeBookCC);
				cvShowImage( "OnlyDancer", ImaDancer);
				//cvWriteToAVI( writer,  ImaDancer);
				/*IplImage *ImaCBvideo = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 );
				cvConvertImage(ImaskCodeBook, ImaCBvideo, CV_GRAY2RGB);
				cvWriteToAVI( writer,  ImaCBvideo);*/
				IplImage * imgRect = cvCreateImage( cvGetSize( ImaDancer), ImaDancer->depth,
                                              ImaDancer->nChannels);
                CvPoint pntmin, pntmax;
                drawRect( ImaDancer, pntmin, pntmax);
                cvCopy( rawImage, imgRect);
                cvRectangle( imgRect, pntmin, pntmax, cvScalar(0,0,255), 1);
				CvFont font;
                double hScale=0.4;
                double vScale=0.4;
                int    lineWidth=1;
                cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth);
                cvPutText (imgRect,"The Dancer", pntmin, &font, cvScalar(255,255,255));
				cvShowImage( "RectDancer", imgRect);
                cvWriteToAVI( writer,  imgRect);
				/*end of Only Dancer*/
				//Display
           		cvShowImage( "Raw", rawImage );
				cvShowImage( "AVG_ConnectComp",ImaskAVGCC);
   				cvShowImage( "ForegroundAVG",ImaskAVG);
 				cvShowImage( "ForegroundCodeBook",ImaskCodeBook);
 				cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);


				//USER INPUT:
	         	c = cvWaitKey(10)&0xFF;
				//End processing on ESC, q or Q
				if(c == 27 || c == 'q' || c == 'Q')
					break;
				//Else check for user input
				switch(c)
				{
					case 'h':
						help();
						break;
					case 'p':
						pause ^= 1;
						break;
					case 's':
						singlestep = 1;
						pause = false;
						break;
					case 'r':
						pause = false;
						singlestep = false;
						break;
					//AVG BACKROUND PARAMS
					case '-':
						if(i > endcapture){
							scalehigh += 0.25;
							printf("AVG scalehigh=%f\n",scalehigh);
							scaleHigh(scalehigh);
						}
						break;
					case '=':
						if(i > endcapture){
							scalehigh -= 0.25;
							printf("AVG scalehigh=%f\n",scalehigh);
							scaleHigh(scalehigh);
						}
						break;
					case '[':
						if(i > endcapture){
							scalelow += 0.25;
							printf("AVG scalelow=%f\n",scalelow);
							scaleLow(scalelow);
						}
						break;
					case ']':
						if(i > endcapture){
							scalelow -= 0.25;
							printf("AVG scalelow=%f\n",scalelow);
							scaleLow(scalelow);
						}
						break;
				//CODEBOOK PARAMS
                case 'y':
                case '0':
                        ch[0] = 1;
                        ch[1] = 0;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'u':
                case '1':
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'v':
                case '2':
                        ch[0] = 0;
                        ch[1] = 0;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'a': //All
                case '3':
                        ch[0] = 1;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'b':  //both u and v together
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
				case 'i': //modify max classification bounds (max bound goes higher)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							maxMod[n] += 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'o': //modify max classification bounds (max bound goes lower)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							maxMod[n] -= 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'k': //modify min classification bounds (min bound goes lower)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							minMod[n] += 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				case 'l': //modify min classification bounds (min bound goes higher)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							minMod[n] -= 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				}

            }
		}
      cvReleaseCapture( &capture );
      cvDestroyWindow( "Raw" );
		cvDestroyWindow( "ForegroundAVG" );
		cvDestroyWindow( "AVG_ConnectComp");
		cvDestroyWindow( "ForegroundCodeBook");
		cvDestroyWindow( "CodeBook_ConnectComp");
        cvDestroyWindow( "RectDancer");
		DeallocateImages();
		if(yuvImage) cvReleaseImage(&yuvImage);
		if(ImaskAVG) cvReleaseImage(&ImaskAVG);
		if(ImaskAVGCC) cvReleaseImage(&ImaskAVGCC);
		if(ImaskCodeBook) cvReleaseImage(&ImaskCodeBook);
		if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC);
		delete [] cB;
		/*dancer*/
		cvDestroyWindow( "OnlyDancer");
		//if( ImaDancer) cvReleaseImage(&ImaDancer);
    }
	else{ printf("\n\nDarn, Something wrong with the parameters\n\n"); help();
	}
    return 0;
}
示例#5
0
int main (void)
{
  MyFreenectDevice * freenect;
  Freenect::Freenect freeNect;
  IplImage * tmp2 = cvCreateImage(cvSize(640, 480), 8, 3);
  IplImage * tmp = cvCreateImage(cvSize(800, 600), 8, 3);
  CvFont font;
  int selected = 1;
  Menu * menu = new Menu(5);
  cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 5);
  freenect = &freeNect.createDevice<MyFreenectDevice>(0);

  cvNamedWindow("fingers",  CV_WINDOW_NORMAL);
  cvSetWindowProperty("fingers", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
  cvNamedWindow("surface");
  cvResizeWindow("fingers", 800, 600);
  cvSet(tmp, CV_RGB(255, 255, 255));
  cvMoveWindow("fingers", 0, 1050);
  cvMoveWindow("surface", 0, 0);
  cvSet(tmp, CV_RGB(255, 255, 255));
  cvShowImage("fingers", tmp);
  cvNamedWindow("rgb");
  cvMoveWindow("rgb", 0 , 480);
  cvNamedWindow("depth");
  cvMoveWindow("depth", 640 , 480);
  freenect->startStream();
  freenect->setTilt(0);
  freenect->setThresh(true, 0, 700);
  int waitTime = 0;

  while (freenect->isKinectRunning())
    {
      if (freenect->isDepthReady() && freenect->isRgbReady())
	{
	  cvSet(tmp, CV_RGB(255, 255, 255));
	  cvZero(tmp2);
	  IplImage * img =  (IplImage*)cvClone(freenect->fetchKinectRgbFrame());
	  
	  if (freenect->isCalibrated() == false && !freenect->getSurface())
	    {
	      sleep(1);
	      freenect->calibrate();
	    }
	  else if (freenect->isCalibrated() == false)
	    {
	      if (waitTime < 30)
		{
		  cvPutText(tmp, "Initiating manual calibration",
			    cvPoint(250, 200), &font, CV_RGB(255, 0 , 0));
		  cvPutText(tmp, "due to bad lighting conditions", 
			    cvPoint(250, 300), &font, CV_RGB(255, 0 , 0));
		  ++waitTime;
		}
	    }
	  else
	    {
	      cvSet(tmp, CV_RGB(0, 0, 0));
	      freenect->calcFingerPositions();
	      std::list<Finger> fList = freenect->getFingerList();
	      std::list<CvPoint> fListRaw = freenect->getRawFingerList();

	      for (std::list<CvPoint>::iterator it = fListRaw.begin() ; it != fListRaw.end() ; ++it)
	      	{
		  cvCircle(freenect->fetchKinectDepthFrame(), cvPoint(it->x, it->y),
			   10, CV_RGB(0, 255, 0), CV_FILLED);
	      	}
	      std::vector<CvPoint> twoFirst;
	      for (std::list<Finger>::iterator it = fList.begin() ; it != fList.end() ;)
	      	{
		  cvCircle(tmp, cvPoint(it->x, it->y), 10, CV_RGB(255, 0, 0), CV_FILLED);
		  ++it;
		}
	      menu->interpretGesture(fList);
	    }
	  menu->drawMenu(tmp);
	  cvShowImage("fingers", tmp);
	  cvShowImage("surface", tmp2);
	  cvShowImage("rgb", freenect->fetchKinectRgbFrame());
	  cvShowImage("depth", freenect->fetchKinectDepthFrame());
	  cvReleaseImage(&img);
	}
      freenect->update();
      int k = freenect->getKey();
      if (k == 27)
	freenect->setRunning(false);
    }
  freenect->stopStream();
  cvDestroyAllWindows();
  exit(0);
  return 0;
}
void moFiducialTrackerModule::applyFilter(IplImage *src) {
	fiducials_data_t *fids = static_cast<fiducials_data_t*>(this->internal);
	moDataGenericContainer *fiducial;
	FiducialX *fdx;
	int fid_count, valid_fiducials = 0;
	bool do_image = this->output->getObserverCount() > 0 ? true : false;
	CvSize size = cvGetSize(src);

	CvFont font, font2;
	cvInitFont(&font, CV_FONT_HERSHEY_DUPLEX, 1.0, 1.0, 0, 2);
	cvInitFont(&font2, CV_FONT_HERSHEY_PLAIN, 1.0, 1.0, 0, 1);

	assert( src != NULL );
	assert( fids != NULL );
	assert( src->imageData != NULL );

	if ( src->nChannels != 1 ) {
		this->setError("FiducialTracker input image must be a single channel binary image.");
		this->stop();
		return;
	}


	// prepare image if we have listener on output
	if ( do_image )
		cvSet(this->output_buffer, CV_RGB(0, 0, 0));

	// libfidtrack
	step_segmenter(&fids->segmenter, (const unsigned char*)src->imageData);
	fid_count = find_fiducialsX(fids->fiducials, MAX_FIDUCIALS,
			&fids->fidtrackerx, &fids->segmenter, src->width, src->height);

	// prepare to refill fiducials
	this->clearFiducials();

	for ( int i = 0; i < fid_count; i++ ) {
		fdx = &fids->fiducials[i];

		// invalid id (INVALID_FIDUCIAL_ID)
		if ( fdx->id < 0 )
			continue;

		// got a valid fiducial ! process...
		valid_fiducials++;

		LOGM(MO_DEBUG, "fid:" << i << " id=" << fdx->id << " pos=" \
			<< fdx->x << "," << fdx->y << " angle=" << fdx->angle);

		fiducial = new moDataGenericContainer();
		fiducial->properties["implements"] = new moProperty("fiducial,pos,tracked");
		fiducial->properties["fiducial_id"] = new moProperty(fdx->id);
		fiducial->properties["blob_id"] = new moProperty(fdx->id);
		fiducial->properties["x"] = new moProperty(fdx->x / size.width);
		fiducial->properties["y"] = new moProperty(fdx->y / size.height);
		fiducial->properties["angle"] = new moProperty(fdx->angle);
		fiducial->properties["leaf_size"] = new moProperty(fdx->leaf_size);
		fiducial->properties["root_size"] = new moProperty(fdx->root_size);
		this->fiducials.push_back(fiducial);

		// draw on output image
		if ( do_image ) {
			std::ostringstream oss;
			oss << fdx->id;
			cvPutText(this->output_buffer, oss.str().c_str(),
				cvPoint(fdx->x, fdx->y - 20), &font, cvScalar(20, 255, 20));

			oss.str("");
			oss << "angle:" << int(fdx->angle * 180 / 3.14159265);
			cvPutText(this->output_buffer, oss.str().c_str(),
				cvPoint(fdx->x - 30, fdx->y), &font2, cvScalar(20, 255, 20));

			oss.str("");
			oss << "l/r:" << fdx->leaf_size << "/" << fdx->root_size;
			cvPutText(this->output_buffer, oss.str().c_str(),
				cvPoint(fdx->x - 50, fdx->y + 20), &font2, cvScalar(20, 255, 20));

		}
	}

	LOGM(MO_DEBUG, "-> Found " << valid_fiducials << " fiducials");
	this->output_data->push(&this->fiducials);
}
示例#7
0
int CvMLData::read_csv(const char* filename)
{
    const int M = 1000000;
    const char str_delimiter[3] = { ' ', delimiter, '\0' };
    FILE* file = 0;
    CvMemStorage* storage;
    CvSeq* seq;
    char *ptr;
    float* el_ptr;
    CvSeqReader reader;
    int cols_count = 0;    
    uchar *var_types_ptr = 0;

    clear();

    file = fopen( filename, "rt" );
    
    if( !file )
        return -1;

    // read the first line and determine the number of variables
    std::vector<char> _buf(M);
    char* buf = &_buf[0];
    if( !fgets_chomp( buf, M, file ))
    {
        fclose(file);
        return -1;
    }

    ptr = buf;
    while( *ptr == ' ' )
        ptr++;
    for( ; *ptr != '\0'; )
    {
        if(*ptr == delimiter || *ptr == ' ')
        {
            cols_count++;
            ptr++;
            while( *ptr == ' ' ) ptr++;
        }
        else
            ptr++;
    }

    if ( cols_count == 0)
    {
        fclose(file);
        return -1;
    }
    cols_count++;

    // create temporary memory storage to store the whole database
    el_ptr = new float[cols_count];
    storage = cvCreateMemStorage();
    seq = cvCreateSeq( 0, sizeof(*seq), cols_count*sizeof(float), storage );

    var_types = cvCreateMat( 1, cols_count, CV_8U );
    cvZero( var_types );
    var_types_ptr = var_types->data.ptr;

    for(;;)
    {
        char *token = NULL;
        int type;
        token = strtok(buf, str_delimiter);
        if (!token) 
            break;
        for (int i = 0; i < cols_count-1; i++)
        {
            str_to_flt_elem( token, el_ptr[i], type);
            var_types_ptr[i] |= type;
            token = strtok(NULL, str_delimiter);
            if (!token)
            {
                fclose(file);
                return -1;
            }
        }
        str_to_flt_elem( token, el_ptr[cols_count-1], type);
        var_types_ptr[cols_count-1] |= type;
        cvSeqPush( seq, el_ptr );
        if( !fgets_chomp( buf, M, file ) )
            break;
    }
    fclose(file);

    values = cvCreateMat( seq->total, cols_count, CV_32FC1 );
    missing = cvCreateMat( seq->total, cols_count, CV_8U );
    var_idx_mask = cvCreateMat( 1, values->cols, CV_8UC1 );
    cvSet( var_idx_mask, cvRealScalar(1) );
    train_sample_count = seq->total;

    cvStartReadSeq( seq, &reader );
    for(int i = 0; i < seq->total; i++ )
    {
        const float* sdata = (float*)reader.ptr;
        float* ddata = values->data.fl + cols_count*i;
        uchar* dm = missing->data.ptr + cols_count*i;

        for( int j = 0; j < cols_count; j++ )
        {
            ddata[j] = sdata[j];
            dm[j] = ( fabs( MISS_VAL - sdata[j] ) <= FLT_EPSILON );
        }
        CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
    }

    if ( cvNorm( missing, 0, CV_L1 ) <= FLT_EPSILON )
        cvReleaseMat( &missing );

    cvReleaseMemStorage( &storage );
    delete []el_ptr;
    return 0;
}
示例#8
0
static
int build_boost_classifier( char* data_filename,
    char* filename_to_save, char* filename_to_load )
{
    const int class_count = 26;
    CvMat* data = 0;
    CvMat* responses = 0;
    CvMat* var_type = 0;
    CvMat* temp_sample = 0;
    CvMat* weak_responses = 0;

    int ok = read_num_class_data( data_filename, 16, &data, &responses );
    int nsamples_all = 0, ntrain_samples = 0;
    int var_count;
    int i, j, k;
    double train_hr = 0, test_hr = 0;
    CvBoost boost;

    if( !ok )
    {
        printf( "Could not read the database %s\n", data_filename );
        return -1;
    }

    printf( "The database %s is loaded.\n", data_filename );
    nsamples_all = data->rows;
    ntrain_samples = (int)(nsamples_all*0.5);
    var_count = data->cols;

    // Create or load Boosted Tree classifier
    if( filename_to_load )
    {
        // load classifier from the specified file
        boost.load( filename_to_load );
        ntrain_samples = 0;
        if( !boost.get_weak_predictors() )
        {
            printf( "Could not read the classifier %s\n", filename_to_load );
            return -1;
        }
        printf( "The classifier %s is loaded.\n", data_filename );
    }
    else
    {
        // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        //
        // As currently boosted tree classifier in MLL can only be trained
        // for 2-class problems, we transform the training database by
        // "unrolling" each training sample as many times as the number of
        // classes (26) that we have.
        //
        // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

        CvMat* new_data = cvCreateMat( ntrain_samples*class_count, var_count + 1, CV_32F );
        CvMat* new_responses = cvCreateMat( ntrain_samples*class_count, 1, CV_32S );

        // 1. unroll the database type mask
        printf( "Unrolling the database...\n");
        for( i = 0; i < ntrain_samples; i++ )
        {
            float* data_row = (float*)(data->data.ptr + data->step*i);
            for( j = 0; j < class_count; j++ )
            {
                float* new_data_row = (float*)(new_data->data.ptr +
                                new_data->step*(i*class_count+j));
                for( k = 0; k < var_count; k++ )
                    new_data_row[k] = data_row[k];
                new_data_row[var_count] = (float)j;
                new_responses->data.i[i*class_count + j] = responses->data.fl[i] == j+'A';
            }
        }

        // 2. create type mask
        var_type = cvCreateMat( var_count + 2, 1, CV_8U );
        cvSet( var_type, cvScalarAll(CV_VAR_ORDERED) );
        // the last indicator variable, as well
        // as the new (binary) response are categorical
        cvSetReal1D( var_type, var_count, CV_VAR_CATEGORICAL );
        cvSetReal1D( var_type, var_count+1, CV_VAR_CATEGORICAL );

        // 3. train classifier
        printf( "Training the classifier (may take a few minutes)...");
        boost.train( new_data, CV_ROW_SAMPLE, new_responses, 0, 0, var_type, 0,
            CvBoostParams(CvBoost::REAL, 100, 0.95, 5, false, 0 ));
        cvReleaseMat( &new_data );
        cvReleaseMat( &new_responses );
        printf("\n");
    }

    temp_sample = cvCreateMat( 1, var_count + 1, CV_32F );
    weak_responses = cvCreateMat( 1, boost.get_weak_predictors()->total, CV_32F ); 

    // compute prediction error on train and test data
    for( i = 0; i < nsamples_all; i++ )
    {
        int best_class = 0;
        double max_sum = -DBL_MAX;
        double r;
        CvMat sample;
        cvGetRow( data, &sample, i );
        for( k = 0; k < var_count; k++ )
            temp_sample->data.fl[k] = sample.data.fl[k];

        for( j = 0; j < class_count; j++ )
        {
            temp_sample->data.fl[var_count] = (float)j;
            boost.predict( temp_sample, 0, weak_responses );
            double sum = cvSum( weak_responses ).val[0];
            if( max_sum < sum )
            {
                max_sum = sum;
                best_class = j + 'A';
            }
        }

        r = fabs(best_class - responses->data.fl[i]) < FLT_EPSILON ? 1 : 0;

        if( i < ntrain_samples )
            train_hr += r;
        else
            test_hr += r;
    }

    test_hr /= (double)(nsamples_all-ntrain_samples);
    train_hr /= (double)ntrain_samples;
    printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
            train_hr*100., test_hr*100. );

    printf( "Number of trees: %d\n", boost.get_weak_predictors()->total );

    // Save classifier to file if needed
    if( filename_to_save )
        boost.save( filename_to_save );

    cvReleaseMat( &temp_sample );
    cvReleaseMat( &weak_responses );
    cvReleaseMat( &var_type );
    cvReleaseMat( &data );
    cvReleaseMat( &responses );

    return 0;
}
示例#9
0
static
int build_rtrees_classifier( char* data_filename,
    char* filename_to_save, char* filename_to_load )
{
    CvMat* data = 0;
    CvMat* responses = 0;
    CvMat* var_type = 0;
    CvMat* sample_idx = 0;

    int ok = read_num_class_data( data_filename, 16, &data, &responses );
    int nsamples_all = 0, ntrain_samples = 0;
    int i = 0;
    double train_hr = 0, test_hr = 0;
    CvRTrees forest;
    CvMat* var_importance = 0;

    if( !ok )
    {
        printf( "Could not read the database %s\n", data_filename );
        return -1;
    }

    printf( "The database %s is loaded.\n", data_filename );
    nsamples_all = data->rows;
    ntrain_samples = (int)(nsamples_all*0.8);

    // Create or load Random Trees classifier
    if( filename_to_load )
    {
        // load classifier from the specified file
        forest.load( filename_to_load );
        ntrain_samples = 0;
        if( forest.get_tree_count() == 0 )
        {
            printf( "Could not read the classifier %s\n", filename_to_load );
            return -1;
        }
        printf( "The classifier %s is loaded.\n", data_filename );
    }
    else
    {
        // create classifier by using <data> and <responses>
        printf( "Training the classifier ...");

        // 1. create type mask
        var_type = cvCreateMat( data->cols + 1, 1, CV_8U );
        cvSet( var_type, cvScalarAll(CV_VAR_ORDERED) );
        cvSetReal1D( var_type, data->cols, CV_VAR_CATEGORICAL );

        // 2. create sample_idx
        sample_idx = cvCreateMat( 1, nsamples_all, CV_8UC1 );
        {
            CvMat mat;
            cvGetCols( sample_idx, &mat, 0, ntrain_samples );
            cvSet( &mat, cvRealScalar(1) );

            cvGetCols( sample_idx, &mat, ntrain_samples, nsamples_all );
            cvSetZero( &mat );
        }

        // 3. train classifier
        forest.train( data, CV_ROW_SAMPLE, responses, 0, sample_idx, var_type, 0,
            CvRTParams(10,10,0,false,15,0,true,4,100,0.01f,CV_TERMCRIT_ITER));
        printf( "\n");
    }

    // compute prediction error on train and test data
    for( i = 0; i < nsamples_all; i++ )
    {
        double r;
        CvMat sample;
        cvGetRow( data, &sample, i );

        r = forest.predict( &sample );
        r = fabs((double)r - responses->data.fl[i]) <= FLT_EPSILON ? 1 : 0;

        if( i < ntrain_samples )
            train_hr += r;
        else
            test_hr += r;
    }

    test_hr /= (double)(nsamples_all-ntrain_samples);
    train_hr /= (double)ntrain_samples;
    printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
            train_hr*100., test_hr*100. );

    printf( "Number of trees: %d\n", forest.get_tree_count() );

    // Print variable importance
    var_importance = (CvMat*)forest.get_var_importance();
    if( var_importance )
    {
        double rt_imp_sum = cvSum( var_importance ).val[0];
        printf("var#\timportance (in %%):\n");
        for( i = 0; i < var_importance->cols; i++ )
            printf( "%-2d\t%-4.1f\n", i,
            100.f*var_importance->data.fl[i]/rt_imp_sum);
    }

    //Print some proximitites
    printf( "Proximities between some samples corresponding to the letter 'T':\n" );
    {
        CvMat sample1, sample2;
        const int pairs[][2] = {{0,103}, {0,106}, {106,103}, {-1,-1}};

        for( i = 0; pairs[i][0] >= 0; i++ )
        {
            cvGetRow( data, &sample1, pairs[i][0] );
            cvGetRow( data, &sample2, pairs[i][1] );
            printf( "proximity(%d,%d) = %.1f%%\n", pairs[i][0], pairs[i][1],
                forest.get_proximity( &sample1, &sample2 )*100. );
        }
    }

    // Save Random Trees classifier to file if needed
    if( filename_to_save )
        forest.save( filename_to_save );

    cvReleaseMat( &sample_idx );
    cvReleaseMat( &var_type );
    cvReleaseMat( &data );
    cvReleaseMat( &responses );

    return 0;
}
示例#10
0
文件: main.cpp 项目: kckemper/kemper
int main( int argc, char** argv ) {
	
	int		cmd;
	float	desired[8][8]	= { {1,0,0,0,0,0,0,0},	// 0
								{0,1,0,0,0,0,0,0},	// 1
								{0,0,1,0,0,0,0,0},	// 2
								{0,0,0,1,1,0,0,0},	// 3 forward
								{0,0,0,1,1,0,0,0},	// 4 forward
								{0,0,0,0,0,1,0,0},	// 5
								{0,0,0,0,0,0,1,0},	// 6
								{0,0,0,0,0,0,0,1}};	// 7
						 
	CvPoint		p,q;
	CvScalar	line_color		= CV_RGB(0,0,255);
	CvScalar	out_color;

	const char* name_orig		= "Original: press q to save images";
	const char* name_ave		= "input";
	const char* name_weights	= "weights";
	
	const char*	inputCmdFile_name	= "./inputs/dataset1/commandlist.dat";
	const char*	outputFile_name		= "./outputs/output_";
	FILE*		outputFile;
	FILE*		inputCmdFile;
	char		inputName[64];
	char		outputName[64];

	
	CvCapture* capture = cvCreateCameraCapture(0) ;
		
	CvSize frame_size;
	CvScalar ave = cvScalar(1);
	
	
	CvRect  slice_rect;
	CvSize	slice_size;

	static	IplImage*	frame				= NULL;
	static	IplImage*	frame_g				= NULL;
	static	IplImage*	frame_small			= NULL;
	static	IplImage*	frame_weights		= NULL;
	static	IplImage*	frame_w_big			= NULL;
	static	IplImage*	frame_w_final		= NULL;
	static	IplImage*	frame_final			= NULL;	
	
	static	IplImage*	ave_image			= NULL;
//	static	IplImage *scale					= NULL;
	
	static	IplImage*	frame_slices[N_SLICES];



	float	inputs[(SIZE/N_SLICES)*SIZE];
	float	outputs[N_SLICES];
	int		choices[N_SLICES];
//	float	desired[N_SLICES];
//	float	desired[] = {0,0,0,1,1,0,0,0};										//XXX dummy test...	

	//Evo (int nNets, int nInputs, int nHidden, int nOuts)
	Evo*	evoSlice;

	int		ep;
	int		trial;
	int		stepCnt;

	int		flag = 0;

	char	c;
	int		i,j,k,s;
	float	tmp;

////////////////////////////////////////////////////////////////////////////////
// init stuff

	
	inputCmdFile	= fopen(inputCmdFile_name,"r");
	if (inputCmdFile == NULL) {printf("Unable to open: %s",inputCmdFile_name); return 0; }
	
	// create windows for looking at stuff
//	cvNamedWindow( name_slice,	CV_WINDOW_AUTOSIZE );
	cvNamedWindow( name_weights,	CV_WINDOW_AUTOSIZE );
	cvNamedWindow( name_ave,		CV_WINDOW_AUTOSIZE );
	cvNamedWindow( name_orig,		CV_WINDOW_AUTOSIZE );
	

//	frame_size	= cvSize(frame->width,frame->height);
	frame_size	= cvSize(SIZE,SIZE);

#ifdef USECAM
	// capture a frame so we can get an idea of the size of the source
	frame = cvQueryFrame( capture );
	if( !frame ) return 0;
#else
	sprintf(inputName,"./inputs/dataset1/image0000000000.jpg");
	frame = cvLoadImage(inputName, 0 );
	if( !frame ){ printf("ERROR OPENING: %s!!!\n",inputName); return 0;}
#endif


	allocateOnDemand( &frame_g,			cvSize(frame->width,frame->height), IPL_DEPTH_8U, 1 );
	allocateOnDemand( &frame_w_big,		cvSize(frame->width,frame->height), IPL_DEPTH_8U, 1 );	
	allocateOnDemand( &frame_w_final,	cvSize(frame->width,frame->height), IPL_DEPTH_8U, 3 );	
	allocateOnDemand( &frame_final,		cvSize(frame->width,frame->height+20), IPL_DEPTH_8U, 3 );	
	
	
	allocateOnDemand( &ave_image,		frame_size, IPL_DEPTH_8U, 1 );
	allocateOnDemand( &frame_small,		frame_size, IPL_DEPTH_8U, 1 );
	allocateOnDemand( &frame_weights,	frame_size, IPL_DEPTH_8U, 1 );


	slice_size = cvSize(ave_image->width/N_SLICES, ave_image->height);


	for (i=0;i<N_SLICES;i++) {
		allocateOnDemand( &frame_slices[i], slice_size, IPL_DEPTH_8U, 1);
	}


	for(trial=0;trial<N_TRIALS;trial++) {


		sprintf(outputName,"%s%d.txt", outputFile_name, trial);
		outputFile		= fopen(outputName,"w");


		// init each leariner
		evoSlice = (Evo*)malloc(sizeof(Evo)*N_SLICES);
		for(i=0;i<N_SLICES;i++) {
			evoSlice[i] = Evo(N_NETS, (SIZE/N_SLICES)*SIZE, N_HIDDEN, 1);
			evoSlice[i].choose();
			choices[i] = evoSlice[i].choose();
		}

		ep		= 0;
		stepCnt = 0;
		flag	= 0;


		while(1) {

	////////////////////////////////////////////////////////////////////////////////
	// Pre processing		

#if 0
			// make blank image...
			cvSet(ave_image, cvScalar(0));

	
			for (i=0;i<NF;i++) {
	
				// get image
#ifdef USECAM
				frame = cvQueryFrame( capture );
				if( !frame ) break;
#else				
				sprintf(inputName,"./inputs/dataset1/image%010d.jpg",stepCnt);
				frame = cvLoadImage(inputName, 0 );
				if( !frame ){ printf("ERROR OPENING: %s!!!\n",inputName); return 0;}
				stepCnt++;
#endif			
				// convert it to grey
				cvConvertImage(frame, frame_g );//, CV_CVTIMG_FLIP);


				// resize
				cvResize(frame_g, frame_small);

				
				// take difference
				cvSub(frame_small, ave_image, ave_image);

			}

			for(j=0;j<SIZE;j++) {
				for(k=0;k<SIZE;k++) {
					PIX(ave_image,k,j) = (char)(PIX(ave_image,k,j)*10);
				}
			}

#endif

#if 0
			frame = cvQueryFrame( capture );
			if( !frame ) break;
			cvConvertImage(frame, frame_g );
			cvResize(frame_g, frame_small);
			cvConvertImage(frame_small, ave_image );
#endif
		
#if 1
			sprintf(inputName,"./inputs/dataset1/image%010d.jpg",stepCnt);
			frame = cvLoadImage(inputName, 0 );
			if( !frame ){ printf("ERROR OPENING: %s!!!\n",inputName); break;}
			cvConvertImage(frame, frame_g );
			cvResize(frame_g, frame_small);
			cvConvertImage(frame_small, ave_image );
			
//			cvCanny(ave_image, ave_image, 50, 40,5);
			
#endif
		
		
	//		cvDilate(ave_image, ave_image,NULL,4);


	////////////////////////////////////////////////////////////////////////////////
	// Generate NN inputs


			// slice it up
			for (i=0;i<N_SLICES;i++) {
	
				slice_rect = cvRect(i*ave_image->width/N_SLICES, 0, ave_image->width/N_SLICES, ave_image->height);

				cvSetImageROI(ave_image, slice_rect);
				
				cvCopy(ave_image, frame_slices[i], NULL);

			}

			cvResetImageROI(ave_image);  // remove this when we don't care about looking at the ave

	////////////////////////////////////////////////////////////////////////////////
	// Evaluate NN
			if (stepCnt == N_LEARN)
				flag = 1;
	
			if( (flag == 1) && (stepCnt%N_LEARN == 0)) {	// every N_LEARN images switch
		
				ep++;
				fprintf(outputFile,"%d",ep);

				for(i=0;i<N_SLICES;i++) {
					evoSlice[i].replace();
					choices[i] = evoSlice[i].choose();
				
					fprintf(outputFile,"\t%1.3f",evoSlice[i].netPool[evoSlice[i].best()].grade);
				
				}
			
				fprintf(outputFile,"\n");
			
				if(ep >= N_EPISODES) break;
			
				// draw weights image
				for(s=0;s<N_SLICES;s++) {
		
					for(j=0;j<SIZE;j++) {
						for(k=0;k<(SIZE/N_SLICES);k++) {
				
							tmp = 0;
							for(i=0;i<N_HIDDEN;i++) {
								tmp += evoSlice[s].mutant->nodeHidden->w[(j*(SIZE/N_SLICES))+k+1];
							}
					
							PIX(frame_weights,k+(s*SIZE/N_SLICES),j) = (char)((tmp/N_HIDDEN)*255 + 127);
		//					printf("%d\t",(char)((tmp/N_HIDDEN)*255));
						}
					}
				}

				cvResize(frame_weights, frame_w_big, CV_INTER_LINEAR);
				cvConvertImage(frame_w_big, frame_w_final);
			
			}


			fscanf(inputCmdFile,"%d",&cmd);

			printf("\nTrial: %d   Episode: %d   Devin's cmd: %d\n",trial,ep,cmd);
			for(i=0;i<N_SLICES;i++)
				printf("%1.3f\t",desired[cmd][i]);
			printf("\n");
		

			for(i=0;i<N_SLICES;i++) {
	//			cvShowImage( name_slice, frame_slices[i] );

				// strip pixel data into a single array
				for(j=0;j<SIZE;j++) {
					for(k=0;k<(SIZE/N_SLICES);k++) {
						inputs[(j*(SIZE/N_SLICES))+k]	= (float)PIX(frame_slices[i],k,j)/255.0;
					}
				}


	//			printf("\n%d: Eval slice %d\n",stepCnt,i);
				outputs[i] = evoSlice[i].eval(inputs, &desired[cmd][i]);
	//			outputs[i] = desired[i];
				printf("%1.3f\t",outputs[i]);

			}
			printf("\n");

			for(i=0;i<N_SLICES;i++) {
				printf("%d\t",choices[i]);
			}
			printf("\n");

			for(i=0;i<N_SLICES;i++) {
				printf("%1.3f\t",evoSlice[i].mutant->grade);
			}
			printf("\n");


		
	////////////////////////////////////////////////////////////////////////////////
	// GUI stuff

		
		
		
			// copy input image into larger final image
			cvSetImageROI(frame_final, cvRect(0, 0, frame_w_big->width, frame_w_big->height));
			cvConvertImage(frame, frame_final);
			cvResetImageROI(frame_final);

			// draw slice markers
			for(i=1;i<N_SLICES;i++) {
				// on the final frame...
				p.x = (int)(i*frame_final->width/N_SLICES);
				p.y = 0;
				q.x = p.x;
				q.y = (int)frame_final->height;
				cvLine( frame_final, p, q, line_color, 2, CV_AA, 0 );

				// on the weights
				p.x = (int)(i*frame_w_final->width/N_SLICES);
				p.y = 0;
				q.x = p.x;
				q.y = (int)frame_w_final->height;
				cvLine( frame_w_final, p, q, line_color, 2, CV_AA, 0 );
			}

			// draw output indicators
			for(i=0;i<N_SLICES;i++) {
				out_color = CV_RGB(outputs[i]*255,0,0);
				p.x = (int)(i*frame_final->width/N_SLICES);
				p.y = (int)(frame_final->height-20);
				q.x = (int)(p.x+frame_final->width/N_SLICES);
				q.y = (int)(p.y+20);
				cvRectangle( frame_final, p, q, out_color, CV_FILLED, CV_AA, 0 );
			}
		
		
			cvShowImage( name_ave,		ave_image );
			cvShowImage( name_orig,		frame_final );
			cvShowImage( name_weights,	frame_w_final );
		
			c = cvWaitKey(2);
		
			if( c == 27 ) break;
			else if( c == 'q') {
				cvSaveImage("weights.jpg",frame_w_final);
				cvSaveImage("output.jpg",frame_final);
			}

			stepCnt++;
			if (stepCnt>=(N_STEPS-(N_STEPS%N_LEARN))) {
				stepCnt=0;
				rewind(inputCmdFile);
			}


		} // end while

		free(evoSlice);
		fclose(outputFile);
	} // end trial for

////////////////////////////////////////////////////////////////////////////////
// clean up
//	delete &evo;

	fclose(inputCmdFile);
	
	cvReleaseCapture(	&capture );
	cvDestroyWindow(	name_ave );
	cvDestroyWindow(	name_orig );
	cvDestroyWindow(	name_weights );
}
示例#11
0
int main( int argc, char **argv ){ 
	int key;							//	キー入力用の変数
	CvCapture *capture = NULL;			//	カメラキャプチャ用の構造体
	IplImage *frameImage;				//	キャプチャ画像用IplImage
	IplImage *frameImage2;				//	キャプチャ画像用IplImage2

	//	画像を生成する
	IplImage *backgroundImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//背景画像用IplImage
	IplImage *grayImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//グレースケール画像用IplImage
	IplImage *differenceImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//差分画像用IplImage

	IplImage *hsvImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 );				//HSV画像用IplImage
	IplImage *hueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//色相(H)情報用IplImage
	IplImage *saturationImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//彩度(S)情報用IplImage
	IplImage *valueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//明度(V)情報用IplImage
	IplImage *thresholdImage1 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//明度がTHRES_BOTTOMより大きい領域用IplImage
//	IplImage *thresholdImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//明度がTHRES_TOP以下の領域用IplImage
//	IplImage *thresholdImage3 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//thresholdImage1とthresholdImage2のAND演算結果用IplImage
	IplImage *lightImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//光っている部分の領域の抽出結果用IplImage
	
	char windowNameCapture[] = "Capture"; 			//キャプチャした画像を表示するウィンドウの名前
	char windowNameLight[] = "Light";				//光っている部分の領域を表示するウィンドウの名前
	char windowNameCapture2[] = "Capture2"; 		//キャプチャした画像を表示するウィンドウの名前
	char windowNameThreshold[] = "Threshold";		//thresholdImage1を表示するウィンドウの名前

	CvMoments moment;
	double m_00;
	double m_10;
	double m_01;
	int gravityX;
	int gravityY;
	unsigned char h,s,v,r,g,b;
	int rr,gg,bb;
	int soundc=0;


	//	カメラを初期化する
	if ( ( capture = cvCreateCameraCapture( 0 ) ) == NULL ) {
		//	カメラが見つからなかった場合
		printf( "カメラが見つかりません\n" );
		return -1;
	}

	//	ウィンドウを生成する
	cvNamedWindow( windowNameCapture, CV_WINDOW_AUTOSIZE );
	cvNamedWindow( windowNameLight, CV_WINDOW_AUTOSIZE );
  	cvNamedWindow( windowNameCapture2, CV_WINDOW_AUTOSIZE );
	cvNamedWindow( windowNameThreshold, CV_WINDOW_AUTOSIZE );

  	//	初期背景を設定するためにカメラから画像を取得
	frameImage = cvQueryFrame( capture );
	//	frameImageをグレースケール化し、背景画像とする
	cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
	frameImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 );
	cvSet (frameImage2, CV_RGB(0,0,0));  //黒色で塗りつぶす

	//	メインループ
	while( 1 ) {
		//	captureの入力画像フレームをframeImageに格納する
		frameImage = cvQueryFrame( capture );
		//	frameImageをグレースケール化したものを、grayImageに格納する
		cvCvtColor( frameImage, grayImage, CV_BGR2GRAY );
		//	grayImageと背景画像との差分をとる
		cvAbsDiff( grayImage, backgroundImage, differenceImage );
		
		//	frameImageをBGRからHSVに変換する
		cvCvtColor( frameImage, hsvImage, CV_BGR2HSV );
		//	HSV画像をH、S、V画像に分ける
		cvSplit( hsvImage, hueImage, saturationImage, valueImage, NULL );
		//	明度が明るい部分を抽出、その部分のみ出力する
		cvThreshold( valueImage, thresholdImage1, THRESH_BOTTOM, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
//		cvThreshold( hueImage, thresholdImage2, THRESH_TOP, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV );
//		cvAnd( thresholdImage1, thresholdImage2, thresholdImage3, NULL );
		
		//	背景差分画像と明るい領域とのANDをとる
		cvAnd( differenceImage, thresholdImage1, lightImage, NULL );

		for(gravityY=0;gravityY<480;gravityY++){
			for(gravityX=0;gravityX<720;gravityX++){
//			h = hsvImage ->imageData[hsvImage ->widthStep * (gravityY) + gravityX * 3 ];     // H
//			s = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 1];    // S
			v = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 2];    // V
			
			b = frameImage ->imageData[frameImage ->widthStep * (gravityY) + gravityX * 3 ];     // B
			g = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 1];    // G
			r = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 2];    // R

			if (r>30){
				rr=250;
				gg=0;
				bb=0;
			}
			if (g>30){
				rr=0;
				gg=250;
				bb=0;
			}
			if (b>30){
				rr=0;
				gg=0;
				bb=250;
			}
			if ((b>30) && (r>30)){
				rr=250;
				gg=0;
				bb=250;
			}
			if ((b>30) && (g>30)){
				rr=0;
				gg=250;
				bb=250;
			}
			if ((b>30) && (g>30) && (r>30)){
				rr=250;
				gg=250;
				bb=250;
			}

		//	画像上に円を描画する
			if (v>80){
			cvCircle( frameImage2, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS, CV_RGB( rr, gg, bb ), LINE_THICKNESS, LINE_TYPE, 0 );
			soundc++;
				if(soundc %30==0){
					//音がなるワンチャン
					PlaySound("notify.wav" , NULL , SND_FILENAME | SND_ASYNC);
					soundc=0;
			}
			}
			}
		}
	
		
		//	光っている領域の重心を算出する
/*		cvMoments( lightImage, &moment, 0 );
		m_00 = cvGetSpatialMoment( &moment, 0, 0 );
		m_10 = cvGetSpatialMoment( &moment, 1, 0 );
		m_01 = cvGetSpatialMoment( &moment, 0, 1 );
		gravityX = m_10 / m_00;
		gravityY = m_01 / m_00;

		if (0<gravityX){
			h = hsvImage ->imageData[hsvImage ->widthStep * (gravityY) + gravityX * 3 ];     // H
			s = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 1];    // S
			v = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 2];    // V

			b = frameImage ->imageData[frameImage ->widthStep * (gravityY) + gravityX * 3 ];     // B
			g = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 1];    // G
			r = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 2];    // R

			printf ("x= %d ,y= %d v= %d,s= %d,h= %d  \n" ,gravityX,gravityY,r,g,b);
			
			rr=250;
			gg=250;
			bb=250;

			if (r>200){
				rr=150;
				gg=0;
				bb=0;
			}
			if (g>200){
				rr=0;
				gg=150;
				bb=0;
			}
			if (b>200){
				rr=0;
				gg=0;
				bb=150;
			}

		//	画像上に円を描画する
			if (v>200){
				
				cvCircle( frameImage2, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS, CV_RGB( rr, gg, bb ), LINE_THICKNESS, LINE_TYPE, 0 );

			}
		}
*/
		//	画像を表示する
		cvShowImage( windowNameCapture, frameImage );
		cvShowImage( windowNameLight, lightImage );
		cvShowImage( windowNameCapture2,   frameImage2);
		cvShowImage( windowNameThreshold, thresholdImage1);

		//	キー入力判定
		key = cvWaitKey( 10 );
		if( key == 'q' ) 
			//	'q'キーが押されたらループを抜ける
			break;
		else if( key == 'b' ) {
			//	'b'キーが押されたら、その時点での画像を背景画像とする
			frameImage = cvQueryFrame( capture );
		    cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
		}
		else if(key == 'c') {
			//	'c'キーが押されたら画像を保存
			cvSet (frameImage2, CV_RGB(0,0,0));  //黒色で塗りつぶす
		}
	}
	//	キャプチャを解放する
	cvReleaseCapture( &capture );
	//	メモリを解放する
	cvReleaseImage( &backgroundImage );
	cvReleaseImage( &grayImage );
	cvReleaseImage( &differenceImage );
	cvReleaseImage( &hsvImage );
	cvReleaseImage( &hueImage );
	cvReleaseImage( &saturationImage );
	cvReleaseImage( &valueImage );
	cvReleaseImage( &thresholdImage1 );
//	cvReleaseImage( &thresholdImage2 );
//	cvReleaseImage( &thresholdImage3 );
	cvReleaseImage( &lightImage );
	//	ウィンドウを破棄する
	cvDestroyWindow( windowNameCapture );
	cvDestroyWindow( windowNameLight );
	cvDestroyWindow( windowNameThreshold );
	cvDestroyWindow( windowNameCapture2 );

	return 0;
} 
CharacterOpacity *CharacterOpacityCalculator::newCharacterOpacityForCharacter(char character) const
{
	IplImage *img = cvCreateImage(cvSize(this->lettersSize.width, this->lettersSize.height), 8, 1);
	cvSet(img, cvScalarAll(255.0));
	
	char buffer[2];
	sprintf(buffer, "%c", character);
	
	CvFont font;
	cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0);
	cvPutText(img, buffer, cvPoint(0, this->lettersSize.height-1), &font, cvScalar(0));
	
	long nBlackPixelsNW = 0;
	int nPixelsNW = 0;
	
	long nBlackPixelsNE = 0;
	int nPixelsNE = 0;
	
	long nBlackPixelsSW = 0;
	int nPixelsSW = 0;
	
	long nBlackPixelsSE = 0;
	int nPixelsSE = 0;
	
	for (int y=0; y<img->height/2; ++y)
	{
		uchar *ptr = (uchar *)(img->imageData + y*img->widthStep);
		for (int x=0; x<img->width/2; ++x)
		{
			nBlackPixelsNW += ptr[x];
			++nPixelsNW;
		}
		for (int x=img->width/2; x<img->width; ++x)
		{
			nBlackPixelsNE += ptr[x];
			++nPixelsNE;
		}
	}
	
	for (int y=img->height/2; y<img->height; ++y)
	{
		uchar *ptr = (uchar *)(img->imageData + y*img->widthStep);
		for (int x=0; x<img->width/2; ++x)
		{
			nBlackPixelsSW += ptr[x];
			++nPixelsSW;
		}
		for (int x=img->width/2; x<img->width; ++x)
		{
			nBlackPixelsSE += ptr[x];
			++nPixelsSE;
		}
	}
	
	cvReleaseImage(&img);
	
	float opacityNW = (((float)nBlackPixelsNW / nPixelsNW)/255.0f);
	float opacityNE = (((float)nBlackPixelsNE / nPixelsNE)/255.0f);
	float opacitySW = (((float)nBlackPixelsSW / nPixelsSW)/255.0f);
	float opacitySE = (((float)nBlackPixelsSE / nPixelsSE)/255.0f);
	
	CharacterOpacity *characterOpacity = new CharacterOpacity(character, opacityNW, opacityNE, opacitySW, opacitySE);
	return characterOpacity;
}
示例#13
0
CV_IMPL void cvFindStereoCorrespondenceGC( const CvArr* _left, const CvArr* _right,
    CvArr* _dispLeft, CvArr* _dispRight, CvStereoGCState* state, int useDisparityGuess )
{
    CvStereoGCState2 state2;
    state2.orphans = 0;
    state2.maxOrphans = 0;

    CV_FUNCNAME( "cvFindStereoCorrespondenceGC" );

    __BEGIN__;

    CvMat lstub, *left = cvGetMat( _left, &lstub );
    CvMat rstub, *right = cvGetMat( _right, &rstub );
    CvMat dlstub, *dispLeft = cvGetMat( _dispLeft, &dlstub );
    CvMat drstub, *dispRight = cvGetMat( _dispRight, &drstub );
    CvSize size;
    int iter, i, nZeroExpansions = 0;
    CvRNG rng = cvRNG(-1);
    int* disp;
    CvMat _disp;
    int64 E;

    CV_ASSERT( state != 0 );
    CV_ASSERT( CV_ARE_SIZES_EQ(left, right) && CV_ARE_TYPES_EQ(left, right) &&
               CV_MAT_TYPE(left->type) == CV_8UC1 );
    CV_ASSERT( !dispLeft ||
        (CV_ARE_SIZES_EQ(dispLeft, left) && CV_MAT_CN(dispLeft->type) == 1) );
    CV_ASSERT( !dispRight ||
        (CV_ARE_SIZES_EQ(dispRight, left) && CV_MAT_CN(dispRight->type) == 1) );

    size = cvGetSize(left);
    if( !state->left || state->left->width != size.width || state->left->height != size.height )
    {
        int pcn = (int)(sizeof(GCVtx*)/sizeof(int));
        int vcn = (int)(sizeof(GCVtx)/sizeof(int));
        int ecn = (int)(sizeof(GCEdge)/sizeof(int));
        cvReleaseMat( &state->left );
        cvReleaseMat( &state->right );
        cvReleaseMat( &state->ptrLeft );
        cvReleaseMat( &state->ptrRight );
        cvReleaseMat( &state->dispLeft );
        cvReleaseMat( &state->dispRight );

        state->left = cvCreateMat( size.height, size.width, CV_8UC3 );
        state->right = cvCreateMat( size.height, size.width, CV_8UC3 );
        state->dispLeft = cvCreateMat( size.height, size.width, CV_16SC1 );
        state->dispRight = cvCreateMat( size.height, size.width, CV_16SC1 );
        state->ptrLeft = cvCreateMat( size.height, size.width, CV_32SC(pcn) );
        state->ptrRight = cvCreateMat( size.height, size.width, CV_32SC(pcn) );
        state->vtxBuf = cvCreateMat( 1, size.height*size.width*2, CV_32SC(vcn) );
        state->edgeBuf = cvCreateMat( 1, size.height*size.width*12 + 16, CV_32SC(ecn) );
    }

    if( !useDisparityGuess )
    {
        cvSet( state->dispLeft, cvScalarAll(OCCLUDED));
        cvSet( state->dispRight, cvScalarAll(OCCLUDED));
    }
    else
    {
        CV_ASSERT( dispLeft && dispRight );
        cvConvert( dispLeft, state->dispLeft );
        cvConvert( dispRight, state->dispRight );
    }

    state2.Ithreshold = state->Ithreshold;
    state2.interactionRadius = state->interactionRadius;
    state2.lambda = cvRound(state->lambda*DENOMINATOR);
    state2.lambda1 = cvRound(state->lambda1*DENOMINATOR);
    state2.lambda2 = cvRound(state->lambda2*DENOMINATOR);
    state2.K = cvRound(state->K*DENOMINATOR);

    icvInitStereoConstTabs();
    icvInitGraySubpix( left, right, state->left, state->right );
    disp = (int*)cvStackAlloc( state->numberOfDisparities*sizeof(disp[0]) );
    _disp = cvMat( 1, state->numberOfDisparities, CV_32S, disp );
    cvRange( &_disp, state->minDisparity, state->minDisparity + state->numberOfDisparities );
    cvRandShuffle( &_disp, &rng );

    if( state2.lambda < 0 && (state2.K < 0 || state2.lambda1 < 0 || state2.lambda2 < 0) )
    {
        float L = icvComputeK(state)*0.2f;
        state2.lambda = cvRound(L*DENOMINATOR);
    }

    if( state2.K < 0 )
        state2.K = state2.lambda*5;
    if( state2.lambda1 < 0 )
        state2.lambda1 = state2.lambda*3;
    if( state2.lambda2 < 0 )
        state2.lambda2 = state2.lambda;

    icvInitStereoTabs( &state2 );

    E = icvComputeEnergy( state, &state2, !useDisparityGuess );
    for( iter = 0; iter < state->maxIters; iter++ )
    {
        for( i = 0; i < state->numberOfDisparities; i++ )
        {
            int alpha = disp[i];
            int64 Enew = icvAlphaExpand( E, -alpha, state, &state2 );
            if( Enew < E )
            {
                nZeroExpansions = 0;
                E = Enew;
            }
            else if( ++nZeroExpansions >= state->numberOfDisparities )
                break;
        }
    }

    if( dispLeft )
        cvConvert( state->dispLeft, dispLeft );
    if( dispRight )
        cvConvert( state->dispRight, dispRight );

    __END__;

    cvFree( &state2.orphans );
}
CvSVM* HoGProcessor::trainSVM(CvMat* pos_mat, CvMat* neg_mat, char *savexml, char *pos_file, char *neg_file) 
{    
	/* Read the feature vectors for positive samples */
	if (pos_file != NULL) 
	{
		printf("positive loading...\n");
		pos_mat = (CvMat*) cvLoad(pos_file);
		printf("positive loaded\n");
	}

	/* Read the feature vectors for negative samples */
	if (neg_file != NULL)
	{
		neg_mat = (CvMat*) cvLoad(neg_file);
		printf("negative loaded\n");
	}

	int n_positive, n_negative;
	n_positive = pos_mat->rows;
	n_negative = neg_mat->rows;
	int feature_vector_length = pos_mat->cols;
	int total_samples;
	
	total_samples = n_positive + n_negative;

	CvMat* trainData = cvCreateMat(total_samples, feature_vector_length, CV_32FC1);

	CvMat* trainClasses = cvCreateMat(total_samples, 1, CV_32FC1 );

	CvMat trainData1, trainData2, trainClasses1, trainClasses2;

	printf("Number of positive Samples : %d\n",
	pos_mat->rows);

	/*Copy the positive feature vectors to training
	data*/
	cvGetRows(trainData, &trainData1, 0, n_positive);
	cvCopy(pos_mat, &trainData1);
	cvReleaseMat(&pos_mat);

	/*Copy the negative feature vectors to training
	data*/
	cvGetRows(trainData, &trainData2, n_positive,total_samples);

	cvCopy(neg_mat, &trainData2);
	cvReleaseMat(&neg_mat);

	printf("Number of negative Samples : %d\n",	trainData2.rows);

	/*Form the training classes for positive and
	negative samples. Positive samples belong to class
	1 and negative samples belong to class 2 */
	cvGetRows(trainClasses, &trainClasses1, 0, n_positive);
	cvSet(&trainClasses1, cvScalar(1));

	cvGetRows(trainClasses, &trainClasses2, n_positive,	total_samples);
	cvSet(&trainClasses2, cvScalar(2));

	/* Train a linear support vector machine to learn from
	the training data. The parameters may played and
	experimented with to see their effects*/
	
	/*
	CvMat* class_weight = cvCreateMat(1, 1, CV_32FC1);
	(*(float*)CV_MAT_ELEM_PTR(*class_weight, 0, 0)) = 0;
	//(*(float*)CV_MAT_ELEM_PTR(*class_weight, 0, 1)) = 10;
	//(*(float*)CV_MAT_ELEM_PTR(*class_weight, 1, 0)) = 100;
	//(*(float*)CV_MAT_ELEM_PTR(*class_weight, 1, 1)) = 0;
*/

	CvSVM* svm = new CvSVM(trainData, trainClasses, 0, 0,
	CvSVMParams(CvSVM::C_SVC, CvSVM::LINEAR, 0, 0, 0, 2,
	0, 0, 0, cvTermCriteria(CV_TERMCRIT_EPS,0, 0.01)));	

	printf("SVM Training Complete!!\n");

	/*Save the learnt model*/
	if (savexml != NULL) 
	{
		svm->save(savexml);
	}

	cvReleaseMat(&trainClasses);
	cvReleaseMat(&trainData);

	return svm;
}
示例#15
0
CV_IMPL void
cvMatchTemplate( const CvArr* _img, const CvArr* _templ, CvArr* _result, int method )
{
    CvMat* sum = 0;
    CvMat* sqsum = 0;
    
    CV_FUNCNAME( "cvMatchTemplate" );

    __BEGIN__;

    int coi1 = 0, coi2 = 0;
    int depth, cn;
    int i, j, k;
    CvMat stub, *img = (CvMat*)_img;
    CvMat tstub, *templ = (CvMat*)_templ;
    CvMat rstub, *result = (CvMat*)_result;
    CvScalar templ_mean = cvScalarAll(0);
    double templ_norm = 0, templ_sum2 = 0;
    
    int idx = 0, idx2 = 0;
    double *p0, *p1, *p2, *p3;
    double *q0, *q1, *q2, *q3;
    double inv_area;
    int sum_step, sqsum_step;
    int num_type = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 :
                   method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2;
    int is_normed = method == CV_TM_CCORR_NORMED ||
                    method == CV_TM_SQDIFF_NORMED ||
                    method == CV_TM_CCOEFF_NORMED;

    CV_CALL( img = cvGetMat( img, &stub, &coi1 ));
    CV_CALL( templ = cvGetMat( templ, &tstub, &coi2 ));
    CV_CALL( result = cvGetMat( result, &rstub ));

    if( CV_MAT_DEPTH( img->type ) != CV_8U &&
        CV_MAT_DEPTH( img->type ) != CV_32F )
        CV_ERROR( CV_StsUnsupportedFormat,
        "The function supports only 8u and 32f data types" );

    if( !CV_ARE_TYPES_EQ( img, templ ))
        CV_ERROR( CV_StsUnmatchedSizes, "image and template should have the same type" );

    if( CV_MAT_TYPE( result->type ) != CV_32FC1 )
        CV_ERROR( CV_StsUnsupportedFormat, "output image should have 32f type" );

    if( img->rows < templ->rows || img->cols < templ->cols )
    {
        CvMat* t;
        CV_SWAP( img, templ, t );
    }

    if( result->rows != img->rows - templ->rows + 1 ||
        result->cols != img->cols - templ->cols + 1 )
        CV_ERROR( CV_StsUnmatchedSizes, "output image should be (W - w + 1)x(H - h + 1)" );

    if( method < CV_TM_SQDIFF || method > CV_TM_CCOEFF_NORMED )
        CV_ERROR( CV_StsBadArg, "unknown comparison method" );

    depth = CV_MAT_DEPTH(img->type);
    cn = CV_MAT_CN(img->type);

    /*if( is_normed && cn == 1 && templ->rows > 8 && templ->cols > 8 &&
        img->rows > templ->cols && img->cols > templ->cols )
    {
        CvTemplMatchIPPFunc ipp_func =
            depth == CV_8U ?
            (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_8u32f_C1R_p :
            method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_8u32f_C1R_p :
            (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_8u32f_C1R_p) :
            (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_32f_C1R_p :
            method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_32f_C1R_p :
            (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_32f_C1R_p);

        if( ipp_func )
        {
            CvSize img_size = cvGetMatSize(img), templ_size = cvGetMatSize(templ);

            IPPI_CALL( ipp_func( img->data.ptr, img->step ? img->step : CV_STUB_STEP,
                                 img_size, templ->data.ptr,
                                 templ->step ? templ->step : CV_STUB_STEP,
                                 templ_size, result->data.ptr,
                                 result->step ? result->step : CV_STUB_STEP ));
            for( i = 0; i < result->rows; i++ )
            {
                float* rrow = (float*)(result->data.ptr + i*result->step);
                for( j = 0; j < result->cols; j++ )
                {
                    if( fabs(rrow[j]) > 1. )
                        rrow[j] = rrow[j] < 0 ? -1.f : 1.f;
                }
            }
            EXIT;
        }
    }*/

    CV_CALL( icvCrossCorr( img, templ, result ));

    if( method == CV_TM_CCORR )
        EXIT;

    inv_area = 1./((double)templ->rows * templ->cols);

    CV_CALL( sum = cvCreateMat( img->rows + 1, img->cols + 1,
                                CV_MAKETYPE( CV_64F, cn )));
    if( method == CV_TM_CCOEFF )
    {
        CV_CALL( cvIntegral( img, sum, 0, 0 ));
        CV_CALL( templ_mean = cvAvg( templ ));
        q0 = q1 = q2 = q3 = 0;
    }
    else
    {
        CvScalar _templ_sdv = cvScalarAll(0);
        CV_CALL( sqsum = cvCreateMat( img->rows + 1, img->cols + 1,
                                      CV_MAKETYPE( CV_64F, cn )));
        CV_CALL( cvIntegral( img, sum, sqsum, 0 ));
        CV_CALL( cvAvgSdv( templ, &templ_mean, &_templ_sdv ));

        templ_norm = CV_SQR(_templ_sdv.val[0]) + CV_SQR(_templ_sdv.val[1]) +
                    CV_SQR(_templ_sdv.val[2]) + CV_SQR(_templ_sdv.val[3]);

        if( templ_norm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED )
        {
            cvSet( result, cvScalarAll(1.) );
            EXIT;
        }
        
        templ_sum2 = templ_norm +
                     CV_SQR(templ_mean.val[0]) + CV_SQR(templ_mean.val[1]) +
                     CV_SQR(templ_mean.val[2]) + CV_SQR(templ_mean.val[3]);

        if( num_type != 1 )
        {
            templ_mean = cvScalarAll(0);
            templ_norm = templ_sum2;
        }
        
        templ_sum2 /= inv_area;
        templ_norm = sqrt(templ_norm);
        templ_norm /= sqrt(inv_area); // care of accuracy here

        q0 = (double*)sqsum->data.ptr;
        q1 = q0 + templ->cols*cn;
        q2 = (double*)(sqsum->data.ptr + templ->rows*sqsum->step);
        q3 = q2 + templ->cols*cn;
    }

    p0 = (double*)sum->data.ptr;
    p1 = p0 + templ->cols*cn;
    p2 = (double*)(sum->data.ptr + templ->rows*sum->step);
    p3 = p2 + templ->cols*cn;

    sum_step = sum ? sum->step / sizeof(double) : 0;
    sqsum_step = sqsum ? sqsum->step / sizeof(double) : 0;

    for( i = 0; i < result->rows; i++ )
    {
        float* rrow = (float*)(result->data.ptr + i*result->step);
        idx = i * sum_step;
        idx2 = i * sqsum_step;

        for( j = 0; j < result->cols; j++, idx += cn, idx2 += cn )
        {
            double num = rrow[j], t;
            double wnd_mean2 = 0, wnd_sum2 = 0;
            
            if( num_type == 1 )
            {
                for( k = 0; k < cn; k++ )
                {
                    t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k];
                    wnd_mean2 += CV_SQR(t);
                    num -= t*templ_mean.val[k];
                }

                wnd_mean2 *= inv_area;
            }

            if( is_normed || num_type == 2 )
            {
                for( k = 0; k < cn; k++ )
                {
                    t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k];
                    wnd_sum2 += t;
                }

                if( num_type == 2 )
                    num = wnd_sum2 - 2*num + templ_sum2;
            }

            if( is_normed )
            {
                t = sqrt(MAX(wnd_sum2 - wnd_mean2,0))*templ_norm;
                if( t > DBL_EPSILON )
                {
                    num /= t;
                    if( fabs(num) > 1. )
                        num = num > 0 ? 1 : -1;
                }
                else
                    num = method != CV_TM_SQDIFF_NORMED || num < DBL_EPSILON ? 0 : 1;
            }

            rrow[j] = (float)num;
        }
    }
        
    __END__;

    cvReleaseMat( &sum );
    cvReleaseMat( &sqsum );
}
示例#16
0
CvMat* vgg_X_from_xP_nonlin(CvMat* u1, CvMat** P1, CvMat* imsize1, int K)
{

	CvMat* u;
	CvMat** P=new CvMat* [K];
	CvMat* imsize;
	
	u=cvCreateMat(u1->rows,u1->cols,u1->type);
	cvCopy(u1,u);

	int kp;
	for(kp=0;kp<K;kp++)
	{
		P[kp]=cvCreateMat(P1[kp]->rows,P1[kp]->cols,P1[kp]->type);
		cvCopy(P1[kp],P[kp]);
	}

	imsize=cvCreateMat(imsize1->rows,imsize1->cols,imsize1->type);
	cvCopy(imsize1,imsize);


	CvMat* X;
	CvMat* H;
	CvMat* u_2_rows;
	CvMat* W;
	CvMat* U;
	CvMat* T;
	CvMat* Y;
	CvMat** Q=new CvMat*[K];//Q is a variable not well defined
	
	CvMat* J;
	CvMat* e;
	CvMat* J_tr;
	CvMat* eprev;
	CvMat* JtJ;
	CvMat* Je;
	CvMat* Y_new;

	CvMat* T_2_cols;
	CvMat* T_rest_cols;
	CvMat* X_T;
	CvScalar f, inf;
	int i, mat_id;
	int u_rows = u->rows;
	int u_cols = u->cols;
	double lambda_min, lambda_max;
	CvMat* imsize_col = cvCreateMat(2, 1, CV_64FC1);

	/* K is the number of images */
	if(K < 2)
	{
		printf("\n Cannot reconstruct 3D from 1 image");
		return 0;
	}

	/* Create temporary matrix for the linear function */
	u_2_rows = cvCreateMat(2, u_cols, CV_64FC1);

	/* Initialize the temporary matrix by extracting the first two rows */
	u_2_rows = cvGetRows(u, u_2_rows, 0, 2, 1);

	/* Call the linear function */
	X = vgg_X_from_xP_lin(u_2_rows, P, K, imsize);

	imsize_col = cvGetCol(imsize, imsize_col, 0);
	f = cvSum(imsize_col);
	f.val[0] = 4 / f.val[0];

	/* Create and initialize H matrix */
	H = cvCreateMat(3, 3, CV_64FC1);

	H->data.db[0] = f.val[0];
	H->data.db[1] = 0;
	H->data.db[2] = ((-1) * f.val[0] * cvmGet(imsize, 0, 0)) / 2;
	H->data.db[3] = 0;
	H->data.db[4] = f.val[0];
	H->data.db[5] = ((-1) * f.val[0] * cvmGet(imsize, 1, 0)) / 2;
	H->data.db[6] = 0;
	H->data.db[7] = 0;
	H->data.db[8] = 1;

	for(mat_id = 0; mat_id < K ; mat_id++)
	{
		cvMatMul(H, P[mat_id], P[mat_id]);
	}
	/* u = H * u; */
	cvMatMul(H, u, u);
	/*
	//debug
	printf("....H\n");
	CvMat_printdb(stdout,"%7.3f ",H);
	//debug
	printf("....u\n");
	CvMat_printdb(stdout,"%7.3f ",u);
	*/

	/* Parametrize X such that X = T*[Y;1]; thus x = P*T*[Y;1] = Q*[Y;1] */
	/* Create the SVD matrices X = U*W*V'*/

	X_T = cvCreateMat(X->cols, X->rows, CV_64FC1); /* M * N */
	W = cvCreateMat(X_T->rows, X_T->cols, CV_64FC1); /* M * N */
	U = cvCreateMat(X_T->rows, X_T->rows, CV_64FC1); /* M * M */
	T = cvCreateMat(X_T->cols, X_T->cols, CV_64FC1); /* N * N */
	cvTranspose(X, X_T);
	cvSVD(X_T, W, U, T);


	cvReleaseMat(&W);
	
	/* T = T(:,[2:end 1]); */
	/* Initialize the temporary matrix by extracting the first two columns */
	/* Create temporary matrix for the linear function */
	T_2_cols    = cvCreateMat(T->rows, 2, CV_64FC1);
	T_rest_cols = cvCreateMat(T->rows, (T->cols - 2), CV_64FC1);

	/* Initialize the temporary matrix by extracting the first two columns */
	T_2_cols= sfmGetCols(T,0,0);
	T_rest_cols=sfmGetCols(T,1,T->cols-1);

	T=sfmAlignMatH(T_rest_cols,T_2_cols);

	for(mat_id = 0; mat_id < K ; mat_id++)
	{
	    /* Create temporary matrix for the linear function */
	    Q[mat_id] = cvCreateMat(P[mat_id]->rows, T->cols, CV_64FC1);
		cvMatMul(P[mat_id], T, Q[mat_id]);
	}

	/*
	//debug
	printf("....Q0\n");
	CvMat_printdb(stdout,"%7.3f ",Q[0]);

	//debug
	printf("....Q1\n");
	CvMat_printdb(stdout,"%7.3f ",Q[1]);
*/
	/* Newton estimation */
	/* Create the required Y matrix for the Newton process */
	Y = cvCreateMat(3, 1, CV_64FC1);
	cvSetZero(Y); /* Y = [0;0;0] */

	/* Initialize the infinite array */
	inf.val[0] = INF;
	inf.val[1] = INF;
	inf.val[2] = INF;
	inf.val[3] = INF;
	eprev = cvCreateMat(1, 1, CV_64FC1); 
	cvSet(eprev, inf, 0);

	for(i = 0 ; i < 10 ; i++)
	{
	//	printf("i=%d....\n",i);
		int pass;
		double RCondVal;

		//initialize e,J before using.

		e=cvCreateMat(2*K,1,CV_64FC1);
		J=cvCreateMat(2*K,3,CV_64FC1);

		pass = resid(Y, u, Q, K, e, J);

		J_tr = cvCreateMat(J->cols, J->rows, CV_64FC1);
		cvTranspose(J, J_tr);

		JtJ = cvCreateMat(J->cols, J->cols, CV_64FC1);
		cvMatMul(J_tr, J, JtJ);

		//prevent memory leak;
		cvReleaseMat(&W);

		/* Create the SVD matrices JtJ = U*W*V'*/
		W = cvCreateMat(J->cols, J->cols, CV_64FC1); 

		cvSVD(JtJ, W); 
		/*
		//debug
		printf("....W\n");
		CvMat_printdb(stdout,"%7.3f ",W);
*/
		lambda_max = W->data.db[0];
		lambda_min = W->data.db[((W->rows * W->cols) - 1)];
		RCondVal   = lambda_min / lambda_max;

		if(1 - (cvNorm(e, 0, CV_L2, 0) / cvNorm(eprev, 0, CV_L2, 0)) < 1000 * EPS)
		{
			cvReleaseMat(&J);
			cvReleaseMat(&e);
			cvReleaseMat(&J_tr);
			cvReleaseMat(&JtJ);
			cvReleaseMat(&W);
			break;
		}
		if(RCondVal < 10 * EPS)
		{
			cvReleaseMat(&J);
			cvReleaseMat(&e);
			cvReleaseMat(&J_tr);
			cvReleaseMat(&JtJ);
			cvReleaseMat(&W);
			break;
		}

		
		cvReleaseMat(&eprev);
		eprev = cvCreateMat(e->rows, e->cols, CV_64FC1); 
		cvCopy(e, eprev);

		Je = cvCreateMat(J->cols, e->cols, CV_64FC1);
		cvMatMul(J_tr, e, Je); /* (J'*e) */

		/*
		//debug
		printf("....J_tr\n");
		CvMat_printdb(stdout,"%7.3f ",J_tr);

		//debug
		printf("....e\n");
		CvMat_printdb(stdout,"%7.3f ",e);

		//debug
		printf("....JtJ\n");
		CvMat_printdb(stdout,"%7.3f ",JtJ);
		//debug
		printf("....Je\n");
		CvMat_printdb(stdout,"%7.3f ",Je);

		
		//debug
		printf("....JtJ\n");
		CvMat_printdb(stdout,"%7.3f ",JtJ);
*/
		cvInvert(JtJ,JtJ);
		/* (J'*J)\(J'*e) */
		Je=sfmMatMul(JtJ, Je);
/*
		//debug
		printf("....Je\n");
		CvMat_printdb(stdout,"%7.3f ",Je);
*/
		/* Y = Y - (J'*J)\(J'*e) */
		cvSub(Y, Je, Y, 0);
		/*
		//debug
		printf("....Y\n");
		CvMat_printdb(stdout,"%7.3f ",Y);
		*/
		cvReleaseMat(&J);
		cvReleaseMat(&e);
		cvReleaseMat(&J_tr);
		cvReleaseMat(&JtJ);
		cvReleaseMat(&Je);
		cvReleaseMat(&W);

	}
	Y_new  = cvCreateMat(4, 1, CV_64FC1);
	PutMatV(Y,Y_new,0);
	Y_new->data.db[3]=1;


	/*
	//debug
	printf("....Y_new\n");
	CvMat_printdb(stdout,"%7.3f ",Y_new);
	printf("....T\n");
	CvMat_printdb(stdout,"%7.3f ",T);
*/

	/* Obtain the new X */
    cvMatMul(T, Y_new, X);


	cvReleaseMat(&H);
	cvReleaseMat(&u_2_rows);
	
	cvReleaseMat(&U);
	cvReleaseMat(&T);
	cvReleaseMat(&Y);


	cvReleaseMat(&Y_new);
	cvReleaseMat(&T_2_cols);
	cvReleaseMat(&T_rest_cols);

	for(kp=0;kp<K;kp++)
	{
		cvReleaseMat(&P[kp]);
	}
	cvReleaseMat(&u);
	cvReleaseMat(&imsize);
	cvReleaseMatGrp(Q,K);

	return X;
}
示例#17
0
void cvFillImage( CvArr* mat, double color )
{
    cvSet( mat, cvColorToScalar(color, cvGetElemType(mat)), 0 );
}
示例#18
0
int main(int argc, char** argv)
{
    
    CvMat* M = cvCreateMat(3, 3, CV_64FC1);

    cvSetIdentity(M);

    CvMat* L = MT_CreateCholeskyResult(M);

    MT_Cholesky(M, L);

    disp_mat(M, "M");
    disp_mat(L, "L = chol(M)");

    cvmSet(M, 1, 1, 4.0);
    cvmSet(M, 2, 2, 9.0);

    MT_Cholesky(M, L);

    disp_mat(M, "M");
    disp_mat(L, "L = chol(M)");

    cvmSet(M, 0, 1, 0.1);
    cvmSet(M, 0, 2, 0.1);
    cvmSet(M, 1, 0, 0.1);
    cvmSet(M, 1, 2, 0.1);
    cvmSet(M, 2, 0, 0.1);
    cvmSet(M, 2, 1, 0.1);

    MT_Cholesky(M, L);

    disp_mat(M, "M");
    disp_mat(L, "L = chol(M)");

    cvReleaseMat(&M);
    cvReleaseMat(&L);

    M = cvCreateMat(5, 5, CV_64FC1);
    cvSet(M, cvScalar(0.1));

    for(unsigned int i = 0; i < 5; i++)
    {
        cvmSet(M, i, i, (double) (i+1));
    }

    L = MT_CreateCholeskyResult(M);

    MT_Cholesky(M, L);

    disp_mat(M, "M");
    disp_mat(L, "L = chol(M)");

    cvReleaseMat(&M);
    cvReleaseMat(&L);


    CvMat* C = cvCreateMat(4, 4, CV_64FC1);
    CvMat* A = cvCreateMatHeader(2, 2, CV_64FC1);
    CvMat* B = cvCreateMatHeader(2, 2, CV_64FC1);

    cvGetSubRect(C, A, cvRect(0, 0, 2, 2));
    cvGetSubRect(C, B, cvRect(2, 2, 2, 2));

    cvSet(C, cvScalar(0));
    cvSet(A, cvScalar(1));
    cvSet(B, cvScalar(2));

    cvReleaseMat(&A);
    cvReleaseMat(&B);

    disp_mat(C, "C (composited)");

    cvReleaseMat(&C);    
        
}
示例#19
0
// COPY RGBA, DEPTH, FEATURE FILES INTO THE DB
// IF NO FEATURE AVAILABLE, DERIVE IT
int Insert(
		char* filename,
		char* DBFolder,
		char* categoryFolder
		){

	char srcpath[FLEN], srcfile[FLEN], dstpath[FLEN], dstfile[FLEN];
	strcpy(srcpath, filename);

	char *pch = strrchr(filename, '/')+1;
	if (pch==NULL){
		pch = filename;
	}
	strcpy(dstpath, DBFolder);
	strcat(dstpath, "/");
	strcat(dstpath, categoryFolder);
	strcat(dstpath, "/");
	strcat(dstpath, pch);

	//read and copy image file
	strcpy(srcfile, srcpath);
	strcat(srcfile, IMG_EXT);

	if (Exists(srcfile)==0){
		fprintf(stderr, "Image file does not exists!\n");
		return 1;
	}else{
		strcpy(dstfile, dstpath);
		strcat(dstfile, IMG_EXT);

		if (FileCopy(srcfile, dstfile)>0){
			fprintf(stderr, "FileCopy Failed!\n");
			return 3;
		}
	}

	//read and copy depth file
	strcpy(srcfile, srcpath);
	strcat(srcfile, DEPTH_EXT);

	if (Exists(srcfile)==0){
		fprintf(stderr, "Depth file does not exists!\n");
		//return 2;
	}else{
		strcpy(dstfile, dstpath);
		strcat(dstfile, DEPTH_EXT);

		if (FileCopy(srcfile, dstfile)>0){
			fprintf(stderr, "FileCopy Failed!\n");
			return 3;
		}
	}

	//read and copy mask file
	strcpy(srcfile, srcpath);
	strcat(srcfile, MASK_EXT);
	strcpy(dstfile, dstpath);
	strcat(dstfile, MASK_EXT);
	if (Exists(srcfile)==0){//if not exist, create a new mask covering the whole image
		strcpy(srcfile, srcpath);
		strcat(srcfile, IMG_EXT);
		IplImage* image = cvLoadImage(srcfile, CV_LOAD_IMAGE_COLOR);
		if (image==NULL){
			fprintf(stderr, "Failed to Load an image file\n");
		}
		// Fill in 255
		IplImage* mask = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U,1);
		cvSet(mask, cvScalarAll(255), NULL);
		cvSaveImage(dstfile, mask, NULL);
		cvReleaseImage(&image);
		cvReleaseImage(&mask);
	}else{
		if (FileCopy(srcfile, dstfile)>0){
			fprintf(stderr, "FileCopy Failed!\n");
			return 3;
		}
	}

	//read and copy feature file
	strcpy(srcfile, srcpath);
	strcat(srcfile, FTR_EXT);

	if (Exists(srcfile)==0){//if not exist, create a new feature file;
		int retCode = MakeFeature(dstpath);
		if (retCode){
			fprintf(stderr, "Failed to create feature\n");
		}
	}else{
		strcpy(dstfile, dstpath);
		strcat(dstfile, FTR_EXT);
		if (FileCopy(srcfile, dstfile)>0){
			fprintf(stderr, "FileCopy Failed!\n");
			return 3;
		}
	}

	return 0;
}
示例#20
0
static bool read_layer_pixels(
	FILE* fp,
	PsdLayerData* pLayerData,
	ImgFile_Ptr pFile,
	ImgLayerGroup_Ptr pLayerGroup)
{
	int i,j;

	//create new layer	
	AddNewLayerHandle* handle = (AddNewLayerHandle*)pFile->CreateImgFileHandle(IFH_ADD_NEW_LAYER );
	handle->SetNewLayerRect(&(pLayerData->rect));
	handle->Do( pFile );
	ImgLayer_Ptr new_layer = handle->GetNewLayer().lock();
	pFile->ReleaseImgFileHandle(handle);
	handle = NULL;

	//set layer synthesizer
	new_layer->SetLayerSynthFourCC( pLayerData->blend_fourcc );

	//set layer opacity
	new_layer->SetOpacity( pLayerData->opacity );

	pFile->SetSelectLayer( new_layer );

	//
	new_layer->SetName( pLayerData->name );
	new_layer->ExtendLayer(); //不足分があれば拡張

	IplImage* channels[4];
	CvSize size = cvSize(
		pLayerData->rect.right - pLayerData->rect.left,
		pLayerData->rect.bottom - pLayerData->rect.top);
	for(i=0; i<4; i++){
		channels[i] = cvCreateImage(size, IPL_DEPTH_8U, 1);
	}
	
	int c_width  = pLayerData->rect.right - pLayerData->rect.left;
	int c_height = pLayerData->rect.bottom - pLayerData->rect.top; 
	for(i=0; i<pLayerData->channel_num; i++){
		int cn=0;
		int16_t id = pLayerData->psd_channels[i].id;

		if(id == PSD_CHANNEL_MASK){
			read_mask_pixels(fp, pLayerData, pFile, pLayerGroup);
			pLayerData->is_mask = false;
			break;
		}

		switch(id){
			case PSD_CHANNEL_RED:
				cn = 2;
				break;
			case PSD_CHANNEL_GREEN:
				cn = 1;
				break;
			case PSD_CHANNEL_BLUE:
				cn = 0;
				break;
			case PSD_CHANNEL_ALPHA:
				cn = 3;
				break;
		}

		if(!read_channel_pixels(
			fp,
			channels[cn],
			c_width,
			c_height,
			pLayerData->depth,
			false))
		{
			return false;
		}
	}
	if(pLayerData->channel_num == 3){
		//set alpha channel
		cvSet(channels[3], cvScalar(255));
	}

	new_layer->Merge(
		pLayerData->rect.left,
		pLayerData->rect.top,
		pLayerData->rect.right - pLayerData->rect.left,
		pLayerData->rect.bottom - pLayerData->rect.top,
		channels[0],
		channels[1],
		channels[2],
		channels[3]);

	LPUPDATE_DATA pData = new_layer->CreateUpdateData();
	pData->isAll = true;
	new_layer->PushUpdateData( pData );

	cvReleaseImage( &channels[0] );
	cvReleaseImage( &channels[1] );
	cvReleaseImage( &channels[2] );
	cvReleaseImage( &channels[3] );

	return true;
}
示例#21
0
void CvMLData::set_var_types( const char* str )
{
    CV_FUNCNAME( "CvMLData::set_var_types" );
    __BEGIN__;

    const char* ord = 0, *cat = 0;
    int var_count = 0, set_var_type_count = 0;
    if ( !values )
        CV_ERROR( CV_StsInternal, "data is empty" );

    var_count = values->cols;

    assert( var_types );
 
    ord = strstr( str, "ord" );
    cat = strstr( str, "cat" );    
    if ( !ord && !cat )
        CV_ERROR( CV_StsBadArg, "types string is not correct" );
    
    if ( !ord && strlen(cat) == 3 ) // str == "cat"
    {
        cvSet( var_types, cvScalarAll(CV_VAR_CATEGORICAL) );
        return;
    }

    if ( !cat && strlen(ord) == 3 ) // str == "ord"
    {
        cvSet( var_types, cvScalarAll(CV_VAR_ORDERED) );
        return;
    }

    if ( ord ) // parse ord str
    {
        char* stopstring = NULL;            
        if ( ord[3] != '[')
            CV_ERROR( CV_StsBadArg, "types string is not correct" );
        
        ord += 4; // pass "ord["
        do
        {
            int b1 = (int)strtod( ord, &stopstring );
            if ( *stopstring == 0 || (*stopstring != ',' && *stopstring != ']' && *stopstring != '-') )
                CV_ERROR( CV_StsBadArg, "types string is not correct" );
            ord = stopstring + 1;
            if ( (stopstring[0] == ',') || (stopstring[0] == ']'))
            {
                if ( var_types->data.ptr[b1] == CV_VAR_CATEGORICAL)
                    CV_ERROR( CV_StsBadArg, "it`s impossible to assign CV_VAR_ORDERED type to categorical variable" );
                var_types->data.ptr[b1] = CV_VAR_ORDERED;
                set_var_type_count++;
            }
            else 
            {
                if ( stopstring[0] == '-') 
                {
                    int b2 = (int)strtod( ord, &stopstring);
                    if ( (*stopstring == 0) || (*stopstring != ',' && *stopstring != ']') )
                        CV_ERROR( CV_StsBadArg, "types string is not correct" );           
                    ord = stopstring + 1;
                    for (int i = b1; i <= b2; i++)
                    {
                        if ( var_types->data.ptr[i] == CV_VAR_CATEGORICAL)
                            CV_ERROR( CV_StsBadArg, "it`s impossible to assign CV_VAR_ORDERED type to categorical variable" );                
                        var_types->data.ptr[i] = CV_VAR_ORDERED;
                    }
                    set_var_type_count += b2 - b1 + 1;
                }
                else
                    CV_ERROR( CV_StsBadArg, "types string is not correct" );

            }
        }
        while (*stopstring != ']');

        if ( stopstring[1] != '\0' && stopstring[1] != ',')
            CV_ERROR( CV_StsBadArg, "types string is not correct" );
    }    

    if ( cat ) // parse cat str
    {
        char* stopstring = NULL;            
        if ( cat[3] != '[')
            CV_ERROR( CV_StsBadArg, "types string is not correct" );
        
        cat += 4; // pass "cat["
        do
        {
            int b1 = (int)strtod( cat, &stopstring );
            if ( *stopstring == 0 || (*stopstring != ',' && *stopstring != ']' && *stopstring != '-') )
                CV_ERROR( CV_StsBadArg, "types string is not correct" );
            cat = stopstring + 1;
            if ( (stopstring[0] == ',') || (stopstring[0] == ']'))
            {
                var_types->data.ptr[b1] = CV_VAR_CATEGORICAL;
                set_var_type_count++;
            }
            else 
            {
                if ( stopstring[0] == '-') 
                {
                    int b2 = (int)strtod( cat, &stopstring);
                    if ( (*stopstring == 0) || (*stopstring != ',' && *stopstring != ']') )
                        CV_ERROR( CV_StsBadArg, "types string is not correct" );           
                    cat = stopstring + 1;
                    for (int i = b1; i <= b2; i++)
                        var_types->data.ptr[i] = CV_VAR_CATEGORICAL;
                    set_var_type_count += b2 - b1 + 1;
                }
                else
                    CV_ERROR( CV_StsBadArg, "types string is not correct" );

            }
        }
        while (*stopstring != ']');

        if ( stopstring[1] != '\0' && stopstring[1] != ',')
            CV_ERROR( CV_StsBadArg, "types string is not correct" );
    }    

    if (set_var_type_count != var_count)
        CV_ERROR( CV_StsBadArg, "types string is not correct" );

     __END__;
}
示例#22
0
static bool read_psd_merged_image(
	FILE* fp,
	ImgFile_Ptr pFile,
	PsdHeader* header)
{
	int i,j,k;

	AddNewLayerHandle* handle = (AddNewLayerHandle*)pFile->CreateImgFileHandle(IFH_ADD_NEW_LAYER );
	handle->Do( pFile );
	ImgLayer_Ptr new_layer = handle->GetNewLayer().lock();
	pFile->ReleaseImgFileHandle(handle);
	handle = NULL;

	pFile->SetSelectLayer( new_layer );

	IplImage* channels[4];
	for(i=0; i<4; i++){
		channels[i] = cvCreateImage(cvSize(header->col, header->row), IPL_DEPTH_8U, 1);
	}

	int bytes_per_pixel = header->depth / 8;
	int bytes_per_row = header->col * bytes_per_pixel;
	int total_bytes = bytes_per_row * header->row;

	int extra_channels;

	switch(header->mode){
		case PSD_COLOR_MODE_RGB:
			extra_channels = header->channels - 3;
	}

	uint16_t** rle_pack_len = (uint16_t**) malloc(sizeof(uint16_t*) * header->channels);
	memset(rle_pack_len, 0, sizeof(uint16_t*) * header->channels);

	uint16_t compression = read_2byte_BE(fp);
	switch(compression){
		case PSD_CM_NONE:
			{
				for(i=0; i<header->channels; i++){
					uint8_t* dst = GetPixelAddress(channels[i], 0, 0);
					if(!fread(dst, total_bytes, 1, fp)){
						return false;
					}
				}
			}
			break;
		case PSD_CM_RLE:
			{
				for(i=0; i<header->channels; i++){
					rle_pack_len[i] = (uint16_t*) malloc(sizeof(uint16_t) * header->row);
					for(j=0; j<header->row; j++){
						rle_pack_len[i][j] = read_2byte_BE(fp);
					}
				}

				for(i=0; i<header->channels; i++){
					for(j=0; j<header->row; j++){
						uint8_t* src = (uint8_t*) malloc(rle_pack_len[i][j]);
						uint8_t* dst = GetPixelAddress(channels[i], 0, j);

						if(!fread(src, rle_pack_len[i][j], 1, fp)){
							return false;
						}

						decode_rle((int8_t*)src, rle_pack_len[i][j], bytes_per_row, (int8_t*)dst);

						free(src);
					}
				}
			}
			break;
	}

	if(rle_pack_len){
		for(i=0; i<header->channels; i++){
			if(rle_pack_len[i]){
				free(rle_pack_len[i]);
			}
		}
		free(rle_pack_len);
	}

	if(header->channels == 3){
		//set alpha channel
		cvSet(channels[3], cvScalar(255));
	}

	new_layer->Merge(
		0, 0,
		header->col,
		header->row,
		channels[2],
		channels[1],
		channels[0],
		channels[3]);

	LPUPDATE_DATA pData = new_layer->CreateUpdateData();
	pData->isAll = true;
	new_layer->PushUpdateData( pData );

	//
	cvReleaseImage( &channels[0] );
	cvReleaseImage( &channels[1] );
	cvReleaseImage( &channels[2] );
	cvReleaseImage( &channels[3] );

	uint32_t now = ftell(fp);
	fseek(fp, 0, SEEK_END);
	uint32_t end = ftell(fp);

	return true;
}
示例#23
0
void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
{
  const int l_num_cost_pts = 200;

  float l_thres = 4;

  IplImage * lp_mask = cvCreateImage(cvGetSize(ap_depth), IPL_DEPTH_8U, 1);
  cvSet(lp_mask, cvRealScalar(0));

  std::vector<CvPoint> l_chain_vector;

  float l_chain_length = 0;
  float * lp_seg_length = new float[a_chain.size()];

  for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
  {
    float x_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x);
    float y_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y);
    lp_seg_length[l_i] = sqrt(x_diff*x_diff + y_diff*y_diff);
    l_chain_length += lp_seg_length[l_i];
  }
  for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
  {
    if (lp_seg_length[l_i] > 0)
    {
      int l_cur_num = cvRound(l_num_cost_pts * lp_seg_length[l_i] / l_chain_length);
      float l_cur_len = lp_seg_length[l_i] / l_cur_num;

      for (int l_j = 0; l_j < l_cur_num; ++l_j)
      {
        float l_ratio = (l_cur_len * l_j / lp_seg_length[l_i]);

        CvPoint l_pts;

        l_pts.x = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x);
        l_pts.y = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y);

        l_chain_vector.push_back(l_pts);
      }
    }
  }
  std::vector<cv::Point3d> lp_src_3Dpts(l_chain_vector.size());

  for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
  {
    lp_src_3Dpts[l_i].x = l_chain_vector[l_i].x;
    lp_src_3Dpts[l_i].y = l_chain_vector[l_i].y;
    lp_src_3Dpts[l_i].z = CV_IMAGE_ELEM(ap_depth, unsigned short, cvRound(lp_src_3Dpts[l_i].y), cvRound(lp_src_3Dpts[l_i].x));
    //CV_IMAGE_ELEM(lp_mask,unsigned char,(int)lp_src_3Dpts[l_i].Y,(int)lp_src_3Dpts[l_i].X)=255;
  }
  //cv_show_image(lp_mask,"hallo2");

  reprojectPoints(lp_src_3Dpts, lp_src_3Dpts, f);

  CvMat * lp_pts = cvCreateMat((int)l_chain_vector.size(), 4, CV_32F);
  CvMat * lp_v = cvCreateMat(4, 4, CV_32F);
  CvMat * lp_w = cvCreateMat(4, 1, CV_32F);

  for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
  {
    CV_MAT_ELEM(*lp_pts, float, l_i, 0) = (float)lp_src_3Dpts[l_i].x;
    CV_MAT_ELEM(*lp_pts, float, l_i, 1) = (float)lp_src_3Dpts[l_i].y;
    CV_MAT_ELEM(*lp_pts, float, l_i, 2) = (float)lp_src_3Dpts[l_i].z;
    CV_MAT_ELEM(*lp_pts, float, l_i, 3) = 1.0f;
  }
  cvSVD(lp_pts, lp_w, 0, lp_v);

  float l_n[4] = {CV_MAT_ELEM(*lp_v, float, 0, 3),
                  CV_MAT_ELEM(*lp_v, float, 1, 3),
                  CV_MAT_ELEM(*lp_v, float, 2, 3),
                  CV_MAT_ELEM(*lp_v, float, 3, 3)};
示例#24
0
int CvANN_MLP::train_rprop( CvVectors x0, CvVectors u, const double* sw )
{
    const int max_buf_sz = 1 << 16;
    CvMat* dw = 0;
    CvMat* dEdw = 0;
    CvMat* prev_dEdw_sign = 0;
    CvMat* buf = 0;
    double **x = 0, **df = 0;
    int iter = -1, count = x0.count;
   
    CV_FUNCNAME( "CvANN_MLP::train" );

    __BEGIN__;

    int i, ivcount, ovcount, l_count, total = 0, max_iter, buf_sz, dcount0, dcount=0;
    double *buf_ptr;
    double prev_E = DBL_MAX*0.5, epsilon;
    double dw_plus, dw_minus, dw_min, dw_max;
    double inv_count;

    max_iter = params.term_crit.max_iter;
    epsilon = params.term_crit.epsilon;
    dw_plus = params.rp_dw_plus;
    dw_minus = params.rp_dw_minus;
    dw_min = params.rp_dw_min;
    dw_max = params.rp_dw_max;

    l_count = layer_sizes->cols;
    ivcount = layer_sizes->data.i[0];
    ovcount = layer_sizes->data.i[l_count-1];

    // allocate buffers
    for( i = 0; i < l_count; i++ )
        total += layer_sizes->data.i[i];

    CV_CALL( dw = cvCreateMat( wbuf->rows, wbuf->cols, wbuf->type ));
    cvSet( dw, cvScalarAll(params.rp_dw0) );
    CV_CALL( dEdw = cvCreateMat( wbuf->rows, wbuf->cols, wbuf->type ));
    cvZero( dEdw );
    CV_CALL( prev_dEdw_sign = cvCreateMat( wbuf->rows, wbuf->cols, CV_8SC1 ));
    cvZero( prev_dEdw_sign );

    inv_count = 1./count;
    dcount0 = max_buf_sz/(2*total);
    dcount0 = MAX( dcount0, 1 );
    dcount0 = MIN( dcount0, count );
    buf_sz = dcount0*(total + max_count)*2;

    CV_CALL( buf = cvCreateMat( 1, buf_sz, CV_64F ));

    CV_CALL( x = (double**)cvAlloc( total*2*sizeof(x[0]) ));
    df = x + total;
    buf_ptr = buf->data.db;

    for( i = 0; i < l_count; i++ )
    {
        x[i] = buf_ptr;
        df[i] = x[i] + layer_sizes->data.i[i]*dcount0;
        buf_ptr += (df[i] - x[i])*2;
    }

    // run rprop loop
    /*
        y_i(t) = w_i(t)*x_{i-1}(t)
        x_i(t) = f(y_i(t))
        E = sum_over_all_samples(1/2*||u - x_N||^2)
        grad_N = (x_N - u)*f'(y_i)

                      MIN(dw_i{jk}(t)*dw_plus, dw_max), if dE/dw_i{jk}(t)*dE/dw_i{jk}(t-1) > 0
        dw_i{jk}(t) = MAX(dw_i{jk}(t)*dw_minus, dw_min), if dE/dw_i{jk}(t)*dE/dw_i{jk}(t-1) < 0
                      dw_i{jk}(t-1) else

        if (dE/dw_i{jk}(t)*dE/dw_i{jk}(t-1) < 0)
           dE/dw_i{jk}(t)<-0
        else
           w_i{jk}(t+1) = w_i{jk}(t) + dw_i{jk}(t)
        grad_{i-1}(t) = w_i^t(t)*grad_i(t)
    */
    for( iter = 0; iter < max_iter; iter++ )
    {
        int n1, n2, si, j, k;
        double* w;
        CvMat _w, _dEdw, hdr1, hdr2, ghdr1, ghdr2, _df;
        CvMat *x1, *x2, *grad1, *grad2, *temp;
        double E = 0;

        // first, iterate through all the samples and compute dEdw
        for( si = 0; si < count; si += dcount )
        {
            dcount = MIN( count - si, dcount0 );
            w = weights[0];
            grad1 = &ghdr1; grad2 = &ghdr2;
            x1 = &hdr1; x2 = &hdr2;

            // grab and preprocess input data
            if( x0.type == CV_32F )
                for( i = 0; i < dcount; i++ )
                {
                    const float* x0data = x0.data.fl[si+i];
                    double* xdata = x[0]+i*ivcount;
                    for( j = 0; j < ivcount; j++ )
                        xdata[j] = x0data[j]*w[j*2] + w[j*2+1];
                }
            else
                for( i = 0; i < dcount; i++ )
                {
                    const double* x0data = x0.data.db[si+i];
                    double* xdata = x[0]+i*ivcount;
                    for( j = 0; j < ivcount; j++ )
                        xdata[j] = x0data[j]*w[j*2] + w[j*2+1];
                }

            cvInitMatHeader( x1, dcount, ivcount, CV_64F, x[0] );

            // forward pass, compute y[i]=w*x[i-1], x[i]=f(y[i]), df[i]=f'(y[i])
            for( i = 1; i < l_count; i++ )
            {
                cvInitMatHeader( x2, dcount, layer_sizes->data.i[i], CV_64F, x[i] );
                cvInitMatHeader( &_w, x1->cols, x2->cols, CV_64F, weights[i] );
                cvGEMM( x1, &_w, 1, 0, 0, x2 );
                _df = *x2;
                _df.data.db = df[i];
                calc_activ_func_deriv( x2, &_df, _w.data.db + _w.rows*_w.cols );
                CV_SWAP( x1, x2, temp );
            }

            cvInitMatHeader( grad1, dcount, ovcount, CV_64F, buf_ptr );
            w = weights[l_count+1];
            grad2->data.db = buf_ptr + max_count*dcount;

            // calculate error
            if( u.type == CV_32F )
                for( i = 0; i < dcount; i++ )
                {
                    const float* udata = u.data.fl[si+i];
                    const double* xdata = x[l_count-1] + i*ovcount;
                    double* gdata = grad1->data.db + i*ovcount;
                    double sweight = sw ? sw[si+i] : inv_count, E1 = 0;

                    for( j = 0; j < ovcount; j++ )
                    {
                        double t = udata[j]*w[j*2] + w[j*2+1] - xdata[j];
                        gdata[j] = t*sweight;
                        E1 += t*t;
                    }
                    E += sweight*E1;
                }
            else
                for( i = 0; i < dcount; i++ )
                {
                    const double* udata = u.data.db[si+i];
                    const double* xdata = x[l_count-1] + i*ovcount;
                    double* gdata = grad1->data.db + i*ovcount;
                    double sweight = sw ? sw[si+i] : inv_count, E1 = 0;

                    for( j = 0; j < ovcount; j++ )
                    {
                        double t = udata[j]*w[j*2] + w[j*2+1] - xdata[j];
                        gdata[j] = t*sweight;
                        E1 += t*t;
                    }
                    E += sweight*E1;
                }

            // backward pass, update dEdw            
            for( i = l_count-1; i > 0; i-- )
            {
                n1 = layer_sizes->data.i[i-1]; n2 = layer_sizes->data.i[i];
                cvInitMatHeader( &_df, dcount, n2, CV_64F, df[i] );
                cvMul( grad1, &_df, grad1 );
                cvInitMatHeader( &_dEdw, n1, n2, CV_64F, dEdw->data.db+(weights[i]-weights[0]) );
                cvInitMatHeader( x1, dcount, n1, CV_64F, x[i-1] );
                cvGEMM( x1, grad1, 1, &_dEdw, 1, &_dEdw, CV_GEMM_A_T );
                // update bias part of dEdw
                for( k = 0; k < dcount; k++ )
                {
                    double* dst = _dEdw.data.db + n1*n2;
                    const double* src = grad1->data.db + k*n2;
                    for( j = 0; j < n2; j++ )
                        dst[j] += src[j];
                }
                cvInitMatHeader( &_w, n1, n2, CV_64F, weights[i] );
                cvInitMatHeader( grad2, dcount, n1, CV_64F, grad2->data.db );

                if( i > 1 )
                    cvGEMM( grad1, &_w, 1, 0, 0, grad2, CV_GEMM_B_T );
                CV_SWAP( grad1, grad2, temp );
            }
        }

        // now update weights
        for( i = 1; i < l_count; i++ )
        {
            n1 = layer_sizes->data.i[i-1]; n2 = layer_sizes->data.i[i];
            for( k = 0; k <= n1; k++ )
            {
                double* wk = weights[i]+k*n2;
                size_t delta = wk - weights[0];
                double* dwk = dw->data.db + delta;
                double* dEdwk = dEdw->data.db + delta;
                char* prevEk = (char*)(prev_dEdw_sign->data.ptr + delta);

                for( j = 0; j < n2; j++ )
                {
                    double Eval = dEdwk[j];
                    double dval = dwk[j];
                    double wval = wk[j];
                    int s = CV_SIGN(Eval);
                    int ss = prevEk[j]*s;
                    if( ss > 0 )
                    {
                        dval *= dw_plus;
                        dval = MIN( dval, dw_max );
                        dwk[j] = dval;
                        wk[j] = wval + dval*s;
                    }
                    else if( ss < 0 )
                    {
                        dval *= dw_minus;
                        dval = MAX( dval, dw_min );
                        prevEk[j] = 0;
                        dwk[j] = dval;
                        wk[j] = wval + dval*s;
                    }
                    else
                    {
                        prevEk[j] = (char)s;
                        wk[j] = wval + dval*s;
                    }
                    dEdwk[j] = 0.;
                }
            }
        }

        if( fabs(prev_E - E) < epsilon )
            break;
        prev_E = E;
        E = 0;
    }

    __END__;

    cvReleaseMat( &dw );
    cvReleaseMat( &dEdw );
    cvReleaseMat( &prev_dEdw_sign );
    cvReleaseMat( &buf );
    cvFree( &x );

    return iter;
}
示例#25
0
void mvWatershedFilter::watershed_generate_markers_internal (IplImage* src, int method, std::vector<CvPoint>* seed_vector) {
// This function generates a bunch of markers and puts them into color_point_vector
    // massively downsample - this smoothes the image
    cvCvtColor (src, scratch_image, CV_BGR2GRAY);
    /*cvResize (scratch_image, ds_image_nonedge, CV_INTER_LINEAR);
      
    // generate the "nonedge image" which is 1 if the pixel isnt an edge image in ds_image_3c
    cvSmooth (ds_image_nonedge, ds_image_nonedge, CV_GAUSSIAN, 5);

    // perform gradient
    IplImage *ds_scratch = cvCreateImageHeader (cvGetSize(ds_image_nonedge), IPL_DEPTH_8U, 1);
    ds_scratch->imageData = scratch_image->imageData;
    cvMorphologyEx (ds_image_nonedge, ds_image_nonedge, ds_scratch, kernel, CV_MOP_GRADIENT);
    cvReleaseImageHeader (&ds_scratch);

    CvScalar mean, stdev;
    cvAvgSdv (ds_image_nonedge, &mean, &stdev);
    cvThreshold (ds_image_nonedge, ds_image_nonedge, mean.val[0]+2*stdev.val[0], 255, CV_THRESH_BINARY);
    cvErode (ds_image_nonedge, ds_image_nonedge, kernel);
    cvNot (ds_image_nonedge, ds_image_nonedge);

    // draw the bad pixels on the image so we can see them
    cvResize (ds_image_nonedge, scratch_image, CV_INTER_NN);
    for (int i = 0; i < scratch_image->height; i++) {
        unsigned char* srcPtr = (unsigned char*)(scratch_image->imageData + i*scratch_image->widthStep);
        unsigned char* dstPtr = (unsigned char*)(src->imageData + i*src->widthStep);

        for (int j = 0; j < scratch_image->width; j++) {        
            if (*srcPtr == 0) {
                dstPtr[0] = 0;
                dstPtr[1] = 0;
                dstPtr[2] = 200;
            }
            srcPtr ++;
            dstPtr += 3;
        }
    }
    */

    if (method & WATERSHED_SAMPLE_RANDOM) {
        cvSet (ds_image_nonedge, CV_RGB(1,1,1));
        // sample the image like this
        // 1. randomly generate an x,y coordinate.
        // 2. Check if the coordinate is a non-edge pixel on the nonedge image.
        // 3. If so add it to color_point_vector and
        // 4. If so mark coordinates near it as edge on the nonege image
        for (int i = 0; i < 200; i++) {
            int x = rand() % ds_image_nonedge->width;
            int y = rand() % ds_image_nonedge->height;

            unsigned char nonedge = *((unsigned char*)ds_image_nonedge->imageData + y*ds_image_nonedge->widthStep + x);
            if (nonedge != 0) {
                // calculate corresponding large image coords
                int xl = x * WATERSHED_DS_FACTOR;
                int yl = y * WATERSHED_DS_FACTOR;
                // 3.
                unsigned char* colorPtr = (unsigned char*)src->imageData + yl*src->widthStep + 3*xl;
                COLOR_TRIPLE ct (colorPtr[0], colorPtr[1], colorPtr[2], 0);;
                color_point_vector.push_back(std::make_pair(ct, cvPoint(xl,yl)));
                // 4.
                cvCircle (ds_image_nonedge, cvPoint(x,y), 10, CV_RGB(0,0,0), -1);          
            }
        }
    }
    else {
        int step = 10;
        if (method & WATERSHED_STEP_SMALL) step = 5;

        COLOR_TRIPLE ct_prev;

        for (int y = step/2; y < src->height; y += step) {
            unsigned char* colorPtr = (unsigned char*)src->imageData + y*src->widthStep + 3*step/2;

            for (int x = step/2; x < src->width; x += step) {
                COLOR_TRIPLE ct (colorPtr[0], colorPtr[1], colorPtr[2], 0);
                
                if (ct.diff(ct_prev) >= 20) {
                    color_point_vector.push_back(std::make_pair(ct, cvPoint(x,y)));
                    ct_prev = ct;
                    //x += step;
                    //colorPtr += 3*step;
                }

                colorPtr += 3*step;
            }
        }
    }

    int diff_limit = 30;
    // the color point vector will have too many pixels that are really similar - get rid of some by merging    
    for (unsigned i = 0; i < color_point_vector.size(); i++) {
        for (unsigned j = i+1; j < color_point_vector.size(); j++) {
            int dx = color_point_vector[i].second.x - color_point_vector[j].second.x;
            int dy = color_point_vector[i].second.y - color_point_vector[j].second.y;  
            
            if (color_point_vector[i].first.diff(color_point_vector[j].first) < diff_limit && dx*dx + dy*dy < 10000)
            {
                if (rand() % 2 == 0) {
                    color_point_vector[i].first.merge(color_point_vector[j].first);
                    color_point_vector.erase(color_point_vector.begin()+j);
                    j--;
                }
                else {
                    color_point_vector[j].first.merge(color_point_vector[i].first);
                    color_point_vector.erase(color_point_vector.begin()+i);
                    i--;
                    break;    
                }
            }
        }
    }
}
示例#26
0
void icvPlaceDistortedSample( CvArr* background,
                              int inverse, int maxintensitydev,
                              double maxxangle, double maxyangle, double maxzangle,
                              int inscribe, double maxshiftf, double maxscalef,
                              CvSampleDistortionData* data )
{
    double quad[4][2];
    int r, c;
    uchar* pimg;
    uchar* pbg;
    uchar* palpha;
    uchar chartmp;
    int forecolordev;
    float scale;
    IplImage* img;
    IplImage* maskimg;
    CvMat  stub;
    CvMat* bgimg;

    CvRect cr;
    CvRect roi;

    double xshift, yshift, randscale;

    icvRandomQuad( data->src->width, data->src->height, quad,
                   maxxangle, maxyangle, maxzangle );
    quad[0][0] += (double) data->dx;
    quad[0][1] += (double) data->dy;
    quad[1][0] += (double) data->dx;
    quad[1][1] += (double) data->dy;
    quad[2][0] += (double) data->dx;
    quad[2][1] += (double) data->dy;
    quad[3][0] += (double) data->dx;
    quad[3][1] += (double) data->dy;

    cvSet( data->img, cvScalar( data->bgcolor ) );
    cvSet( data->maskimg, cvScalar( 0.0 ) );

    cvWarpPerspective( data->src, data->img, quad );
    cvWarpPerspective( data->mask, data->maskimg, quad );

    cvSmooth( data->maskimg, data->maskimg, CV_GAUSSIAN, 3, 3 );

    bgimg = cvGetMat( background, &stub );

    cr.x = data->dx;
    cr.y = data->dy;
    cr.width = data->src->width;
    cr.height = data->src->height;

    if( inscribe )
    {
        /* quad's circumscribing rectangle */
        cr.x = (int) MIN( quad[0][0], quad[3][0] );
        cr.y = (int) MIN( quad[0][1], quad[1][1] );
        cr.width  = (int) (MAX( quad[1][0], quad[2][0] ) + 0.5F ) - cr.x;
        cr.height = (int) (MAX( quad[2][1], quad[3][1] ) + 0.5F ) - cr.y;
    }

    xshift = maxshiftf * rand() / RAND_MAX;
    yshift = maxshiftf * rand() / RAND_MAX;

    cr.x -= (int) ( xshift * cr.width  );
    cr.y -= (int) ( yshift * cr.height );
    cr.width  = (int) ((1.0 + maxshiftf) * cr.width );
    cr.height = (int) ((1.0 + maxshiftf) * cr.height);

    randscale = maxscalef * rand() / RAND_MAX;
    cr.x -= (int) ( 0.5 * randscale * cr.width  );
    cr.y -= (int) ( 0.5 * randscale * cr.height );
    cr.width  = (int) ((1.0 + randscale) * cr.width );
    cr.height = (int) ((1.0 + randscale) * cr.height);

    scale = MAX( ((float) cr.width) / bgimg->cols, ((float) cr.height) / bgimg->rows );

    roi.x = (int) (-0.5F * (scale * bgimg->cols - cr.width) + cr.x);
    roi.y = (int) (-0.5F * (scale * bgimg->rows - cr.height) + cr.y);
    roi.width  = (int) (scale * bgimg->cols);
    roi.height = (int) (scale * bgimg->rows);

    img = cvCreateImage( cvSize( bgimg->cols, bgimg->rows ), IPL_DEPTH_8U, 1 );
    maskimg = cvCreateImage( cvSize( bgimg->cols, bgimg->rows ), IPL_DEPTH_8U, 1 );

    cvSetImageROI( data->img, roi );
    cvResize( data->img, img );
    cvResetImageROI( data->img );
    cvSetImageROI( data->maskimg, roi );
    cvResize( data->maskimg, maskimg );
    cvResetImageROI( data->maskimg );

    forecolordev = (int) (maxintensitydev * (2.0 * rand() / RAND_MAX - 1.0));

    for( r = 0; r < img->height; r++ )
    {
        for( c = 0; c < img->width; c++ )
        {
            pimg = (uchar*) img->imageData + r * img->widthStep + c;
            pbg = (uchar*) bgimg->data.ptr + r * bgimg->step + c;
            palpha = (uchar*) maskimg->imageData + r * maskimg->widthStep + c;
            chartmp = (uchar) MAX( 0, MIN( 255, forecolordev + (*pimg) ) );
            if( inverse )
            {
                chartmp ^= 0xFF;
            }
            *pbg = (uchar) (( chartmp*(*palpha )+(255 - (*palpha) )*(*pbg) ) / 255);
        }
    }

    cvReleaseImage( &img );
    cvReleaseImage( &maskimg );
}
示例#27
0
int main( int argc, char** argv )
{
    printf( "Basic OCR (Handwritten Digit Recogniser)\n"
		"Hot keys: \n"
	"\tr - reset image\n"
	"\t+ - cursor radio ++\n"
	"\t- - cursor radio --\n"
	"\ts - Save image as out.png\n"
	"\tc - Classify image, the result in console\n"
        "\tESC - quit the program\n");
	drawing=0;
	r=10;
	red=green=blue=0;
	last_x=last_y=red=green=blue=0;
	//Create image
	imagen=cvCreateImage(cvSize(128,128),IPL_DEPTH_8U,1);
	//Set data of image to white
	cvSet(imagen, CV_RGB(255,255,255),NULL);
	//Image we show user with cursor and other artefacts we need
	screenBuffer=cvCloneImage(imagen);
	
	//Create window
    	cvNamedWindow( "Demo", 0 );

	cvResizeWindow("Demo", 128,128);
	//Create mouse CallBack
	cvSetMouseCallback("Demo",&on_mouse, 0 );


	//////////////////
	//My OCR
	//////////////////
	basicOCR ocr;
	
	//Main Loop
    for(;;)
    {
		int c;

        cvShowImage( "Demo", screenBuffer );
        c = cvWaitKey(10);
        if( (char) c == 27 )
            break;
	if( (char) c== '+' ){
		r++;
		drawCursor(last_x,last_y);
	}
	if( ((char)c== '-') && (r>1) ){
		r--;
		drawCursor(last_x,last_y);
	}
	if( (char)c== 'r'){
		cvSet(imagen, cvRealScalar(255),NULL);
		drawCursor(last_x,last_y);
	}
	if( (char)c== 's'){
		cvSaveImage("out.png", imagen);
	}
	if( (char)c=='c'){
		ocr.classify(imagen,1);
	}
		
    }

    cvDestroyWindow("Demo");

    return 0;
}
示例#28
0
//-------------------------------------------------------------------------------------
void ofxCvFloatImage::set(float value){
	cvSet(cvImage, cvScalar(value));
    flagImageChanged();
}
//--------------------------------------------------------------------------------
void ofxCvColorImage::set(int valueR, int valueG, int valueB){
    cvSet(cvImage, cvScalar(valueR, valueG, valueB));
}
示例#30
0
void VisualOdometry::slotProcessImage(const QString& name, const QSize& imageSize, const QVector3D& position, const QQuaternion& orientation, const QByteArray imageData)
{
    mNumberOfFramesProcessed++;

    // first copy the current image into the previous image
    memcpy(mImage1->imageData, mImage2->imageData, mFrameSize.width * mFrameSize.height);

    // convert the YCbCr from the camera into the current gray image
    Camera::convertYCbCr422ToGray8((u_char*)imageData.constData(), (u_char*)mImage2->imageData, imageSize.width(), imageSize.height());

    // black out the top quater of the image because of propeller noise
    for(int i=0; i<mFrameSize.width*(mFrameSize.height/4); i++) mImage2->imageData[i]=0;

    // On the first execution, mImage1 is still undefined.
    if(mNumberOfFramesProcessed == 1) return;

    // reset debug image to black
    cvSet(mImageDebug, cvScalar(0));

    char filename[50];
    sprintf(filename, "image1_%d.jpg", mNumberOfFramesProcessed);
//    cvSaveImage(filename, mImage1);

    sprintf(filename, "image2_%d.jpg", mNumberOfFramesProcessed);
//    cvSaveImage(filename, mImage2);

//    qDebug() << "numberOfFeatures before is" << mNumberOfFeatures;
    cvGoodFeaturesToTrack(mImage1, mImageEigen, mImageTemp, mFeaturesFrame1, &mNumberOfFeatures, .01, .01, NULL);
//                qDebug() << "numberOfFeatures after  is" << mNumberOfFeatures;


    /* Actually run Pyramidal Lucas Kanade Optical Flow!!
     * "frame1_1C" is the first frame with the known features.
     * "frame2_1C" is the second frame where we want to find the first frame's features.
     * "pyramid1" and "pyramid2" are workspace for the algorithm.
     * "frame1_features" are the features from the first frame.
     * "frame2_features" is the (outputted) locations of those features in the second frame.
     * "number_of_features" is the number of features in the frame1_features array.
     * "optical_flow_window" is the size of the window to use to avoid the aperture problem.
     * "5" is the maximum number of pyramids to use.  0 would be just one level.
     * "optical_flow_found_feature" is as described above (non-zero iff feature found by the flow).
     * "optical_flow_feature_error" is as described above (error in the flow for this feature).
     * "mOpticalFlowTerminationCriteria" is as described above (how long the algorithm should look).
     * "0" means disable enhancements.  (For example, the second array isn't pre-initialized with guesses.)
     */
    cvCalcOpticalFlowPyrLK(
        mImage1,
        mImage2,
        mImagePyramid1,
        mImagePyramid2,
        mFeaturesFrame1,
        mFeaturesFrame2,
        mNumberOfFeatures,
        mOpticalFlowWindow,
        5,
        mOpticalFlowFoundFeature,
        mOpticalFlowFeatureError,
        mOpticalFlowTerminationCriteria,
        0);

    qint64 averageDirectionX = 0;
    qint64 averageDirectionY = 0;

    /* For fun (and debugging :)), let's draw the flow field. */
    for(int i = 0; i < mNumberOfFeatures; i++)
    {
        /* If Pyramidal Lucas Kanade didn't really find the feature, skip it. */
        if(mOpticalFlowFoundFeature[i] == 0) continue;

        CvPoint p1,p2;

        p1.x = (int) mFeaturesFrame1[i].x;
        p1.y = (int) mFeaturesFrame1[i].y;
        p2.x = (int) mFeaturesFrame2[i].x;
        p2.y = (int) mFeaturesFrame2[i].y;

        averageDirectionX += p1.x - p2.x;
        averageDirectionY += p1.y - p2.y;

//                        const float angle = atan2( (double) p1.y - p2.y, (double) p1.x - p2.x);
//                        const float hypotenuse = sqrt(SQUARE(p1.y - p2.y) + SQUARE(p1.x - p2.x));
        /* Here we lengthen the arrow by a factor of three. */
//                        p2.x = (int) (p1.x - 3 * hypotenuse * cos(angle));
//                        p2.y = (int) (p1.y - 3 * hypotenuse * sin(angle));

        /* Now we draw the main line of the arrow. */
        /* "mImage1" is the frame to draw on.
         * "p" is the point where the line begins.
         * "q" is the point where the line stops.
         * "CV_AA" means antialiased drawing.
         * "0" means no fractional bits in the center cooridinate or radius.
         */
        cvLine( mImage1, p1, p2, CV_RGB(255,255,255), 1, CV_AA, 0);

        /*
        // Now draw the tips of the arrow.  I do some scaling so that the tips look proportional to the main line of the arrow.
        p.x = (int) (q.x + 9 * cos(angle + pi / 4));
        p.y = (int) (q.y + 9 * sin(angle + pi / 4));
        cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
        p.x = (int) (q.x + 9 * cos(angle - pi / 4));
        p.y = (int) (q.y + 9 * sin(angle - pi / 4));
        cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
        */
    }

    CvPoint p1, p2;
    p1.x = mFrameSize.width/2;
    p1.y = mFrameSize.height/2;

    p2.x = p1.x + (averageDirectionX/10) + mLastAverageHorizontalSpeed;
    p2.y = p1.y;// + (averageDirectionY/5);

    mLastAverageHorizontalSpeed = averageDirectionX/12;

    QTextStream out(mLogFile);
    out << "frame " << mNumberOfFramesProcessed << " " << averageDirectionX << " " << averageDirectionY << '\n';

    cvLine( mImage1, p1, p2, CV_RGB(255,255,255), 1, CV_AA, 0 );
    sprintf(filename, "frame_%d.jpg", mNumberOfFramesProcessed);
//                cvSaveImage(filename, mImage1);
    cv::imwrite(std::string(filename), mImage1);


    qDebug() << "AVG DIRECTION" << averageDirectionX << averageDirectionY;
}