Пример #1
0
plateInfo* processPlateChars( IplImage * orig_img )
{
	//Create placeholder for gray image
	IplImage * gray_img = cvCreateImage(cvGetSize(orig_img), IPL_DEPTH_8U, 1);
		
	//Convert to image grayscale
	cvCvtColor( orig_img, gray_img, CV_RGB2GRAY );
		
	//Create placeholder for black and white image
	IplImage * bw_img = cvCreateImage(cvGetSize(gray_img), IPL_DEPTH_8U,	1);

	//Convert gray image to binary (black and white)
	cvThreshold( gray_img, bw_img, 128, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);

	//Invert image
	IplImage * rev_img = cvCreateImage(cvGetSize(bw_img), IPL_DEPTH_8U, 1);
	cvNot( bw_img, rev_img );

	//Save results
//	cvSaveImage( "bw_img.jpg", bw_img, NULL );
//	cvSaveImage( "rev_img.jpg", rev_img, NULL );


	//Resize the reversed image: 400x200 (435x218??)
	IplImage * resize_img = cvCreateImage(cvSize(400, 200), IPL_DEPTH_8U, 1);
	cvResize( rev_img, resize_img, CV_INTER_LINEAR) ;

	//Save results
//	cvSaveImage( "resize_img.jpg", resize_img, NULL );
	
	//Okay, now find the reversed (and resized) image's size
	CvSize resize_size = cvGetSize( resize_img );
	int w = resize_size.width;
	int h = resize_size.height;

	printf("Width: %d\nHieght: %d\n", w, h);

	//Allrighty, now try to crop
	//First, create new image in the right size
	//Old data for below: cvSize( w, h-108 ) (w-30, h-108)
	IplImage * resize_crop = cvCreateImage(cvSize(w-60, h-108), IPL_DEPTH_8U, 1);

	//Old data for below: cvRect( 0, 54, w, h-108)  (15, 54 w-30, h-108)
	cvSetImageROI( resize_img, cvRect(30, 54, w-60, h-108) );//15
	cvCopy( resize_img, resize_crop, NULL );

	//Save this result
//	cvSaveImage( "resize_crop.jpg", resize_crop, NULL );

	CvSize resizeCrop_size = cvGetSize( resize_crop );
	int w2 = resizeCrop_size.width;
	int h2 = resizeCrop_size.height;

	printf("\nWidth: %d\nHieght: %d\n", w2, h2);

	//Now get the characters (using stuff from commented section below
	struct plateInfo * plate_info = malloc( sizeof(plateInfo) * 8 );
	
	CvSeq * cvSeq = 0;
	CvMemStorage * storage = cvCreateMemStorage(0);
	int numContours;
	int i;

	//Poor man's debugger...
	char * plate_buffer[] = {"plate0.jpg", "plate1.jpg", "plate2.jpg",
		"plate3.jpg", "plate4.jpg","plate5.jpg", "plate6.jpg",
		"plate7.jpg"};


	//This should be 8
	numContours = cvFindContours( resize_crop, storage, &cvSeq,
			sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
	
	printf("\nnumContours plate: %d\n", numContours);
	
	cvDrawContours( resize_crop, cvSeq, cvScalarAll(255), cvScalarAll(0),
			1, -1, 8, cvPoint(0,0) );
	  
	for( i = 0; i < 8; i++)
	{
		//Get bounding rect
		CvRect char_rect = cvBoundingRect( cvSeq, 0 );
		
		//Create and set individual images
		plate_info[i].charImage = NULL;
		plate_info[i].charImage = cvCreateImage(cvSize(char_rect.width, char_rect.height), IPL_DEPTH_8U, 1);

		//****Moved to 'fix' below - contour area and perimeter*****
		//plate_info[i].contourArea = cvContourArea( cvSeq, CV_WHOLE_SEQ, 0 );
		//plate_info[i].arcLength = cvArcLength( cvSeq, CV_WHOLE_SEQ, -1 );

		//Set ROI for copying and copy	
		cvSetImageROI( resize_crop, char_rect );
		cvCopy( resize_crop, plate_info[i].charImage, NULL );
	
		//For the poor man's debugger
		//cvSaveImage( plate_buffer[i], plate_chars[i], NULL );
		//cvSaveImage( plate_buffer[i], plate_info[i].charImage, NULL );

		cvSeq = cvSeq->h_next;
	}


	//Fix area and perimeter prob***********************
	for( i = 0; i < 8; i++)
	{
		cvClearMemStorage(storage);
	
  		int singlecon = cvFindContours( plate_info[i].charImage, storage,
				&cvSeq, sizeof(CvContour), CV_RETR_EXTERNAL,
				CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

		//resize_crop
		cvDrawContours( plate_info[i].charImage, cvSeq, cvScalarAll(255),
				cvScalarAll(0), 1, -1, 8, cvPoint(0,0) );

//		printf("\nnumContours single %d\n", singlecon);
		plate_info[i].contourArea = cvContourArea( cvSeq, CV_WHOLE_SEQ, 0 );
		plate_info[i].arcLength = cvArcLength( cvSeq, CV_WHOLE_SEQ, 0 );
	//	printf("plateChar contour area: %f\n", plate_info[i].contourArea);
	//	printf("plateChar perimeter: %f\n", plate_info[i].arcLength);
	//	cvSaveImage( plate_buffer[i], plate_info[i].charImage, NULL );
	}
	

	cvReleaseImage(&orig_img);
	cvReleaseImage(&gray_img);
	cvReleaseImage(&bw_img);
	cvReleaseImage(&rev_img);
	cvReleaseImage(&resize_img);
	cvReleaseImage(&resize_crop);
	cvReleaseMemStorage(&storage);

	return plate_info;
}
Пример #2
0
void CDataOut::R_Out()
{
  int ii=0;
  RFile<<"library(rgl) "<<endl;
  RFile<<"rgl.clear(\"all\")"<<endl;
  RFile<<"rgl.bg(sphere = TRUE, color = c(\"black\", \"green\"), lit = FALSE, size=2, alpha=0.2, back = \"lines\")"<<endl;
  RFile<<"rgl.light()"<<endl;
  RFile<<"rgl.bbox()"<<endl;
  CvMat *temp2,*temp3;
  temp2=cvCreateMat(3,6,CV_32FC1);
  temp3=cvCreateMat(3,3,CV_32FC1);
  	    CvMat* m = cvCreateMat(3,1,CV_32FC1);
  CvMat *temp,*jtemp;
  temp=cvCreateMatHeader(6,6,CV_32FC1);
  jtemp=cvCreateMat(3,6,CV_32FC1);
  for ( list<CElempunto*>::iterator It=pEstimator->pMap->bbdd.begin();
	It != pEstimator->pMap->bbdd.end(); It++ )
    {
      if((*It)->state>2)
	{
	  float s_th=sin((*It)->theta);
	  float c_th=cos((*It)->theta);
	  float s_ph=sin((*It)->phi);
	  float c_ph=cos((*It)->phi);

	  cvmSet(m,0,0,sin((*It)->phi));
	  cvmSet(m,1,0,-sin((*It)->theta));
	  cvmSet(m,2,0,cos((*It)->phi));
	  cvNormalize( m, m);

	  float xreal=(*It)->wx +cvmGet(m,0,0)/(*It)->rho;
	  float yreal=(*It)->wy +cvmGet(m,1,0)/(*It)->rho;
	  float zreal=(*It)->wz +cvmGet(m,2,0)/(*It)->rho;


	  cvZero(jtemp);
	  cvmSet(jtemp,0,0,1);
	  cvmSet(jtemp,1,1,1);
	  cvmSet(jtemp,2,2,1);

	  cvmSet(jtemp,0,3,(-s_th*s_ph)/(*It)->rho);
	  cvmSet(jtemp,1,3,(-c_th     )/(*It)->rho);
	  cvmSet(jtemp,2,3,(-s_th*c_ph)/(*It)->rho);

	  cvmSet(jtemp,0,4,(c_th*c_ph)/(*It)->rho);
	  cvmSet(jtemp,1,4,(0    )/(*It)->rho);
	  cvmSet(jtemp,2,4,(-c_th*s_ph)/(*It)->rho);

	  cvmSet(jtemp,0,5,(-c_th*s_ph)/((*It)->rho*(*It)->rho));
	  cvmSet(jtemp,1,5,(s_th     )/((*It)->rho*(*It)->rho));
	  cvmSet(jtemp,2,5,(-c_th*c_ph)/((*It)->rho*(*It)->rho));

	  if (12+ii*6< pEstimator->pCovMat->width && 12+ii*6< pEstimator->pCovMat->height)
	    {

	      cvGetSubRect( pEstimator->pCovMat,temp,cvRect(12+ii*6,12+ii*6,6,6) );
	      cvMatMul(jtemp,temp,temp2);
	      cvGEMM( temp2,jtemp,1,NULL,0,temp3,CV_GEMM_B_T );

	      RFile<<"p"<<ii<< " <- matrix(c(" ;
	      for (int i=0; i<2 ; i++)
		for (int j=0; j<3;j++)
		  {
		    RFile<<cvmGet(temp3,i,j)<<",";
		  }
	      RFile<<cvmGet(temp3,2,0)<<",";
	      RFile<<cvmGet(temp3,2,1)<<",";
	      RFile<<cvmGet(temp3,2,2);
	      RFile<<"),3,3)"<<endl;
	      RFile<<"pos <- c("<<xreal<<", ";
	      RFile<<yreal<<", ";
	      RFile<<zreal<<")"<<endl;
	      RFile<<"try(plot3d( ellipse3d(p"<<ii<<",centre=";
	      RFile<<"pos), col=\"blue\", alpha=0.5, add = TRUE) )"<<endl;
	    }
	}
      ii++;
    }
  RFile<<"p"<<ii<< " <- matrix(c(" ;
  for (int i=0; i<2 ; i++)
    for (int j=0; j<3;j++)
      {
	RFile<<cvmGet(pEstimator->pCovMat,2*i,2*j)<<",";
      }
   RFile<<cvmGet(pEstimator->pCovMat,4,0)<<",";
   RFile<<cvmGet(pEstimator->pCovMat,4,2)<<",";
   RFile<<cvmGet(pEstimator->pCovMat,4,4);
   RFile<<"),3,3)"<<endl;
   RFile<<"pos <- c("<< cvmGet(pEstimator->pDataCam->translation,0,0)<<", ";
   RFile<< cvmGet(pEstimator->pDataCam->translation,1,0)<<", ";
   RFile<< cvmGet(pEstimator->pDataCam->translation,2,0)<<") "<<endl;
   RFile<<"plot3d( ellipse3d(p"<<ii<<",centre=";
   RFile<<"pos), col=\"red\", alpha=0.5, add = TRUE) "<<endl;
   RFile<<"rgl.viewpoint(45,30)"<<endl;
   RFile<<"rgl.snapshot(\"c:\\\\out"<<iter<<".png\")"<<endl;
   //RFile.close();

   cvReleaseMat(&temp);
   cvReleaseMat(&temp2);
   cvReleaseMat(&temp3);
   cvReleaseMat(&jtemp);

}
int main(int argc, char* argv[]){
    IplImage *frame, *frameL, *frameR;

    //load image
    //images are loaded by openCV with color channels in this order: BGR
    frame = cvLoadImage(argv[1], 1);

    //Simple error handling
    if(!frame){
       printf("Erro ao abrir a imagem.");
       exit(-1);
    }

    //Verify if image have width divisible by 2
    if(frame->width % 2 != 0){
          printf("Imagem possui largura não divisível por 2. Favor cortar!");
          exit(-1);
    }

    //get width and height from original image
    int width = frame->width;
    int height = frame->height;


    //new images will have half width of original image
    CvSize size = cvSize( width/2, height);

    //copy image properties
    frameL = cvCreateImage(size, frame->depth, frame->nChannels);
    frameR = cvCreateImage(size, frame->depth, frame->nChannels);
    cvZero(frameL);
    cvZero(frameR);

    //set ROI to cut the image on the left (half of the original)
    cvSetImageROI(frame, cvRect(0,0, width/2, height));

    //copy image portion
    cvCopy(frame, frameL);

    //reset ROI
    cvResetImageROI(frame);

    //set ROI to cut the image on the right (second half of the original)
    cvSetImageROI(frame, cvRect(width/2, 0, width/2, height));

    //copy image portion
    cvCopy(frame, frameR);

    //reset ROI
    cvResetImageROI(frame);

    //save images
    cvSaveImage("frameLeft.bmp", frameL);
    cvSaveImage("frameRight.bmp", frameR);

    //create anaglyph
    createAnaglyph(frameL, frameR);

    //free pointers
    cvReleaseImage(&frame);
    cvReleaseImage(&frameL);
    cvReleaseImage(&frameR);

    printf("\n\nImagens separadas com sucesso!\n\n");

    return 0;
}
Пример #4
0
static void process_image_common(IplImage *frame)
{
  CvFont font;
  cvInitFont(&font, CV_FONT_VECTOR0, 0.25f, 0.25f);

  CvSize video_size;
#if defined(USE_POSIX_SHARED_MEMORY)
  video_size.height = *shrd_ptr_height;
  video_size.width  = *shrd_ptr_width;
#else
  // XXX These parameters should be set ROS parameters
  video_size.height = frame->height;
  video_size.width  = frame->width;
#endif
  CvSize    frame_size = cvSize(video_size.width, video_size.height/2);
  IplImage *temp_frame = cvCreateImage(frame_size, IPL_DEPTH_8U, 3);
  IplImage *gray       = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *edges      = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *half_frame = cvCreateImage(cvSize(video_size.width/2, video_size.height/2), IPL_DEPTH_8U, 3);

  CvMemStorage *houghStorage = cvCreateMemStorage(0);

  cvPyrDown(frame, half_frame, CV_GAUSSIAN_5x5); // Reduce the image by 2

  /* we're intersted only in road below horizont - so crop top image portion off */
  crop(frame, temp_frame, cvRect(0, frame_size.height, frame_size.width, frame_size.height));
  cvCvtColor(temp_frame, gray, CV_BGR2GRAY); // contert to grayscale

  /* Perform a Gaussian blur & detect edges */
  // smoothing image more strong than original program
  cvSmooth(gray, gray, CV_GAUSSIAN, 15, 15);
  cvCanny(gray, edges, CANNY_MIN_TRESHOLD, CANNY_MAX_TRESHOLD);

  /* do Hough transform to find lanes */
  double rho = 1;
  double theta = CV_PI/180;
  CvSeq *lines = cvHoughLines2(edges, houghStorage, CV_HOUGH_PROBABILISTIC,
                               rho, theta, HOUGH_TRESHOLD, HOUGH_MIN_LINE_LENGTH, HOUGH_MAX_LINE_GAP);

  processLanes(lines, edges, temp_frame, frame);

#ifdef SHOW_DETAIL
  /* show middle line */
  cvLine(temp_frame, cvPoint(frame_size.width/2, 0),
         cvPoint(frame_size.width/2, frame_size.height), CV_RGB(255, 255, 0), 1);

  // cvShowImage("Gray", gray);
  // cvShowImage("Edges", edges);
  // cvShowImage("Color", temp_frame);
  // cvShowImage("temp_frame", temp_frame);
  // cvShowImage("frame", frame);
#endif

#if defined(USE_POSIX_SHARED_MEMORY)
  setImage_toSHM(frame);
#endif

#ifdef SHOW_DETAIL
  // cvMoveWindow("Gray", 0, 0);
  // cvMoveWindow("Edges", 0, frame_size.height+25);
  // cvMoveWindow("Color", 0, 2*(frame_size.height+25));
#endif

  cvReleaseMemStorage(&houghStorage);
  cvReleaseImage(&gray);
  cvReleaseImage(&edges);
  cvReleaseImage(&temp_frame);
  cvReleaseImage(&half_frame);
}
Пример #5
0
int main(int argc, char **argv)
{
    // Initialize ROS
    ros::init(argc, argv, "ic2020_vodom");
    ros::NodeHandle n;   

    ros::Subscriber surf_sub = n.subscribe("/surf/keyframes", 5, optflowCallback);
    ros::Publisher vodom_pub = n.advertise<ic2020_vodom::keyframe>("/vodom/keyframes", 100);    

    // Wait for video streams to be up
    BlockWhileWaitingForVideo();  

    // Create display images
	view_im = cvCreateImage( cvSize(2*newArrival->width, newArrival->height), 8, IPL_PXL_BYTES );

	#ifdef VISUALIZE
	cvNamedWindow("VisualOdom", CV_WINDOW_AUTOSIZE);
	#endif

    // Main loop
    printf("Entering main loop\n");

    while (ros::ok())
    {
        char c = cvWaitKey(5);
        if (c == 'Q' || c == 'q')
            break; 

        // Get Images
        ros::spinOnce();

        // Check if new keyframe is available
        if (newArrival == NULL) { continue; }
        printf ("\33[2J");
        
        // Rotate in new data
        RotateNewArrivalIn();
        
        /**********************************
            Check we have two keyframes
        ***********************************/
        if (kA == 0 || kB == 0) { continue; }
        
        printf("Keyframe A: %i\n", kA->keyframe_num);
        printf("Keyframe B: %i\n", kB->keyframe_num);
        
        // COPY IMAGE DATA TO DOUBLE SIZE IMAGE
        cvSetImageROI( view_im, cvRect(0, 0, kB->im->width, kB->im->height));
        cvCopy( kB->im, view_im );
        cvSetImageROI( view_im, cvRect(kB->im->width, 0, kA->im->width, kA->im->height));
        cvCopy( kA->im, view_im );
        cvResetImageROI( view_im );

        // DRAW RED CIRCLES ON FEATURES
        for (unsigned int i = 0; i < kB->features.size(); i++) {
            cvCircle(view_im, cvPoint(cvRound(kB->features[i].point2D[0]),
                     cvRound(kB->features[i].point2D[1])), 3.0f, colors[0], 2, 8);
        }
        for (unsigned int i = 0; i < kA->features.size(); i++) {
            cvCircle(view_im, cvPoint(cvRound(kA->features[i].point2D[0]) + kB->im->width, 
                     cvRound(kA->features[i].point2D[1])), 3.0f, colors[0], 2, 8);              
        }
        for (unsigned int i = 0; i < kB->numCorn1; i++) {
            cvCircle(view_im, cvPoint(cvRound(kB->corn1[i].point2D[0]),
                     cvRound(kB->corn1[i].point2D[1])), 3.0f, colors[1], 1, 8);
        }
        for (unsigned int i = 0; i < kA->numCorn2; i++) {
            cvCircle(view_im, cvPoint(cvRound(kA->corn2[i].point2D[0]) + kB->im->width, 
                     cvRound(kA->corn2[i].point2D[1])), 3.0f, colors[1], 1, 8);              
        }
        
        /**********************************
          Initial RANSAC w SURF and STCorn
        ***********************************/
        
        // GET SURF PAIRS
        tt = (double)cvGetTickCount();
        std::vector<unsigned int> pairs;
        SURFHelper::findSURFPairs(&kA->descBuffer, &kB->descBuffer, pairs);
        tt = (double)cvGetTickCount() - tt;
        //printf( "SURF Match Time = %gms\n", tt/(cvGetTickFrequency()*1000.));        
        printf( "Found %i SURF Matches \n", pairs.size()/2);
        
        // RANSAC
        std::vector<unsigned int> filtered_surf_pairs;
        std::vector<unsigned int> filtered_corn_pairs;
        tt = (double)cvGetTickCount();
        if (kA->numCorn2 == kB->numCorn1) {
            if (!VisualOdometry::RANSAC6DFast(&kA->features, &kB->features, &pairs, &filtered_surf_pairs,
                                          &kA->corn2[0], &kB->corn1[0], &kB->status[0], kB->numCorn1, &filtered_corn_pairs,
                                          kB->im->width, kB->im->height, 10, 10, 1)) 
            //if (!VisualOdometry::RANSAC6D(&kA->features, &kB->features, &pairs, &filtered_surf_pairs,
            //                              &kA->corn2[0], &kB->corn1[0], &kB->status[0], kB->numCorn1, &filtered_corn_pairs)) 
            //if (!VisualOdometry::RANSAC6DReproj(&kA->features, &kB->features, &pairs, &filtered_surf_pairs))
            {
                printf("RANSAC MATCHES FEATURE # AREN'T EQUAL OR LESS THAN 7 FEATURES \n");
                continue;
            }
        } else {
            printf("WTF KEYFRAME A's FORWARD ST FEATURES != KEYFRAME B's BACK ST FEATURES \n");
        }
        tt = (double)cvGetTickCount() - tt;
        printf( "RANSAC Time = %gms\n", tt/(cvGetTickFrequency()*1000.));        

        // Create index links from B to A
        std::vector<int> revReferenceA;
        for(unsigned int i = 0; i < (unsigned int)kA->features.size(); i++) { revReferenceA.push_back(-1); }
        for(unsigned int i = 0; i < (unsigned int)filtered_surf_pairs.size()/2; i++ )
        {
            int a = filtered_surf_pairs[2*i+0];
            int b = filtered_surf_pairs[2*i+1]; 
            kB->surfMatches[b] = a;
            revReferenceA[a] = b;
        } 
     
        // Remove Useless SURF Features
        std::vector<feature> tfeatures;
        std::vector<surfdesc> tdescBuffer;
        std::vector<int> tsurfMatches;
        for (unsigned int i = 0; i < kA->features.size(); i++) {
            if (revReferenceA[i] >= 0) { // is being matched in the next frame
                tfeatures.push_back(kA->features[i]);
                tdescBuffer.push_back(kA->descBuffer[i]);
                tsurfMatches.push_back(kA->surfMatches[i]);
                kB->surfMatches[revReferenceA[i]] = tfeatures.size() - 1;
            }
            else if (kA->surfMatches[i] >= 0) { // has a match in the previous frame
                tfeatures.push_back(kA->features[i]);
                tdescBuffer.push_back(kA->descBuffer[i]);
                tsurfMatches.push_back(kA->surfMatches[i]);
            }
        }
        kA->features = tfeatures;
        kA->descBuffer = tdescBuffer;
        kA->surfMatches = tsurfMatches;
     
        // CREATE VECTOR OF MATCHES
        std::vector<feature> matchesA2;
        std::vector<feature> matchesB2;
        // ADD IN SURF MATCHES
        for(unsigned int i = 0; i < (unsigned int)filtered_surf_pairs.size()/2; i++ )
        {
            //int a = filtered_surf_pairs[2*i+0];
            int b = filtered_surf_pairs[2*i+1]; 
            int a = kB->surfMatches[b];
            matchesA2.push_back(kA->features[a]);
            matchesB2.push_back(kB->features[b]);
        } 
     
        // ADD IN CORNER MATCHES
        for(unsigned int i = 0; i < kB->numCorn1; i++ )
        {
            if (filtered_corn_pairs[i] > 0) {
                matchesA2.push_back(kA->corn2[i]);
                matchesB2.push_back(kB->corn1[i]);
            } else {
                kB->status[i] = 0;
            }
        }
        
        // Print Green Circles Over RANSAC Points
        for (unsigned int i = 0; i < matchesA2.size(); i++) {
            float lx = matchesB2[i].point2D[0];
            float ly = matchesB2[i].point2D[1];
            float lx2 = matchesA2[i].point2D[0];
            float ly2 = matchesA2[i].point2D[1];
            cvCircle(view_im,cvPoint(cvRound(lx), cvRound(ly)), 3.0f, colors[3], 2, 8);
            cvCircle(view_im, cvPoint(cvRound(lx2) + kA->im->width, cvRound(ly2)), 3.0f, colors[3], 2, 8);  
            cvLine(view_im, cvPoint(cvRound(lx), cvRound(ly)), cvPoint(cvRound(lx2), cvRound(ly2)), colors[3] );
        }

        // Least Squares
        double rdata[9];
        double translation[3];
        
        VisualOdometry::ArunLeastSquares(&matchesA2, &matchesB2, rdata, translation);      
        
        for (unsigned int i = 0; i < 9; i++) { kB->rotation[i] = rdata[i]; }
        for (unsigned int i = 0; i < 3; i++) { kB->translation[i] = translation[i]; }
        
        /*********************
            ICP
        **********************/
        // Setup Images and Contours
        
	/*
        // Call ICP
        double rdata2[9];
        double translation2[3];
        VisualOdometry::ICPKeyframes(kA, kB, rdata, translation, rdata2, translation2);
        for (unsigned int i = 0; i < 9; i++) { kB->rotation[i] = rdata2[i]; }
        for (unsigned int i = 0; i < 3; i++) { kB->translation[i] = translation2[i];}
	*/
        /*********************
            Publish Frame
        **********************/        
        
        // Print Rotation and Translation
        double pitch = atan2(-rdata[6], sqrt(pow(rdata[0],2.0)+pow(rdata[3],2.0)));
        double yaw = atan2(rdata[3]/cos(pitch), rdata[0]/cos(pitch));
        double roll = atan2(rdata[7]/cos(pitch), rdata[8]/cos(pitch));
        printf("pit yaw rol: %f %f %f\n",pitch,yaw,roll);
        printf("translation: %f %f %f\n",translation[0],translation[1],translation[2]); 
        
        /*double pitch2 = atan2(-rdata2[6], sqrt(pow(rdata2[0],2.0)+pow(rdata2[3],2.0)));
        double yaw2 = atan2(rdata2[3]/cos(pitch), rdata2[0]/cos(pitch));
        double roll2 = atan2(rdata2[7]/cos(pitch), rdata2[8]/cos(pitch));
        printf("icp pit yaw rol: %f %f %f\n",pitch2,yaw2,roll2);
        printf("icp translation: %f %f %f\n",translation2[0],translation2[1],translation2[2]); */

        // Publish Point Clouds
        printf("publishing\n");
        kA->PublishKeyframe(&vodom_pub);
        printf("done publishing\n");

	#ifdef VISUALIZE
        // Show stereo image
        cvShowImage("VisualOdom", view_im);
	#endif
    }

    return 0;
}
Пример #6
0
/*
// Getting feature pyramid  
//
// API
// int getFeaturePyramid(IplImage * image, const filterObject **all_F, 
                      const int n_f,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, featurePyramid **maps);
// INPUT
// image             - image
// lambda            - resize scale
// k                 - size of cells
// startX            - X coordinate of the image rectangle to search
// startY            - Y coordinate of the image rectangle to search
// W                 - width of the image rectangle to search
// H                 - height of the image rectangle to search
// OUTPUT
// maps              - feature maps for all levels
// RESULT
// Error status
*/
int getFeaturePyramid(IplImage * image,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, CvLSVMFeaturePyramid **maps)
{
    IplImage *img2, *imgTmp, *imgResize;
    float   step, tmp;
    int      cntStep;
    int      maxcall;
    int i;
    int err;
    CvLSVMFeatureMap *map;
    
    //geting subimage
    cvSetImageROI(image, cvRect(startX, startY, W, H));
    img2 = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
    cvCopy(image, img2, NULL);
    cvResetImageROI(image);

    if(img2->depth != IPL_DEPTH_32F)
    {
        imgResize = cvCreateImage(cvSize(img2->width , img2->height) , IPL_DEPTH_32F , 3);
        cvConvert(img2, imgResize);
    }
    else
    {
        imgResize = img2;
    }
    
    step = powf(2.0f, 1.0f/ ((float)lambda));
    maxcall = W/k;
    if( maxcall > H/k )
    {
        maxcall = H/k;
    }
    cntStep = (int)(logf((float)maxcall/(5.0f))/logf(step)) + 1;
    //printf("Count step: %f %d\n", step, cntStep);

    allocFeaturePyramidObject(maps, lambda, cntStep + lambda);

    for(i = 0; i < lambda; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(img2, tmp, 4);
        err = getFeatureMaps_dp(imgTmp, 4, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
        cvReleaseImage(&imgTmp);
    }

    /**********************************one**************/
    for(i = 0; i <  cntStep; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(imgResize, tmp, 8);
	    err = getFeatureMaps_dp(imgTmp, 8, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i + lambda] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
		cvReleaseImage(&imgTmp);
    }/*for(i = 0; i < cntStep; i++)*/

    if(img2->depth != IPL_DEPTH_32F)
    {
        cvReleaseImage(&imgResize);
    }

    cvReleaseImage(&img2);
    return LATENT_SVM_OK;
}
Пример #7
0
char operateImage(Userdata *userdata) {
	if (!userdata) {
		return 0;
	}

	IplImage *image1 = userdata->input[0];
	IplImage *image2 = userdata->input[1];
	IplImage *imageOut = userdata->output[0];
	IplImage *imageOut2 = userdata->output[1];

	static int color_mode = 4;
	static int smooth_mode = 0;
	static int otsu_mode = 0;
	static int close_mode = 0;
	static int canny_mode = 0;
	static int contour_mode = 0;
	static int hsv_mode = 0;
	static int save_img = 0;
	static int history_mode = 0;

	int key = userdata->key;
	switch (key) {
	case 'g':
		color_mode++;
		color_mode %= 5;
		break;
	case 's':
		smooth_mode = !smooth_mode;
		break;
	case 'o':
		otsu_mode = !otsu_mode;
		break;
	case 'e':
		close_mode = !close_mode;
		break;
	case 'c':
		canny_mode = !canny_mode;
		break;
	case 'b':
		contour_mode = !contour_mode;
		break;
	case 'h':
		hsv_mode = !hsv_mode;
		break;
	case 'H':
		history_mode = !history_mode;
		break;
	case 'S':
		save_img = 1;
		break;
	default:
		//cout << key << "\n";
		break;
	}

	int value = userdata->value;
	int kernelSize = userdata->kernelSize;
	kernelSize += 1 - (kernelSize % 2);
	int lowThreshold = userdata->lowThreshold;
	int highThreshold = userdata->highThreshold;
	CvScalar minScalar = cvScalar(userdata->minScalar0, userdata->minScalar1, userdata->minScalar2);
	CvScalar maxScalar = cvScalar(userdata->maxScalar0, userdata->maxScalar1, userdata->maxScalar2);

	static IplImage *tmp1d = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 1);
	static IplImage *tmp3d = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);
	static IplImage *tmp3d2 = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);

	static IplImage *backgroundAcc = cvCreateImage(cvGetSize(image1), IPL_DEPTH_32F, 3);
	static IplImage *background = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);

	COND_PRINTF("                                                                                                 \r");

	char img_full_channel = 0;
	switch (color_mode) {
	case 0:
		COND_PRINTF("Gray");
		cvCvtColor(image1, tmp1d, CV_BGR2GRAY);
		break;
	case 1: // Hue mode
		COND_PRINTF("Hue");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, tmp1d, NULL, NULL, NULL);
		break;
	case 2: // Saturation mode
		COND_PRINTF("Saturation");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, NULL, tmp1d, NULL, NULL);
		break;
	case 3: // Brightness mode
		COND_PRINTF("Brightness");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, NULL, NULL, tmp1d, NULL);
		break;
	case 4: // 
		COND_PRINTF("Color");
		img_full_channel = 1;
		break;
	}

	//filterByVolume(tmp1d, tmp1d, value);
	if (img_full_channel) { // Image has 3 channel
#if 0
		cvRunningAvg(image1, backgroundAcc, (double)userdata->accValue / 1024, NULL);
		cvConvertScale(backgroundAcc, background, 1, 0);
		cvNamedWindow(CONTROL_WINDOW "41", 0);
		cvResizeWindow(CONTROL_WINDOW "41", 640 / 2, 480 / 2);
		cvShowImage(CONTROL_WINDOW "41", background);
		cvCreateTrackbar("accValue", CONTROL_WINDOW "41", &(userdata->accValue), 1024, trackbarCallback);

#endif
		filterByHSV(image1, minScalar, maxScalar, tmp3d);
		if (history_mode) {
			cvCopy(image1, tmp3d, NULL);
			cvCopy(image1, tmp3d2, NULL);
			//cvCvtColor(image1, tmp3d, CV_BGR2HSV);

			//CvRect rect = cvRect(userdata->size.width * 3 / 4 - 40, userdata->size.height / 2 - 40, 80, 80);
			//CvRect rect = cvRect(userdata->size.width * 1 / 4 - 40, userdata->size.height / 2 - 40, userdata->size.width * 3 / 4, 80);
			CvRect rect = cvRect(userdata->square.origin.x, userdata->square.origin.y, userdata->square.size.width, userdata->square.size.height);
			cvSetImageROI(tmp3d, rect);
			GraficarHistograma(tmp3d, tmp3d2);
			cvResetImageROI(tmp3d);

			cvCopy(tmp3d2, tmp3d, NULL);
		}
		else {
			cvCopy(image1, tmp3d, NULL);
		}
	}
	else { // Image has 1 channel

		cvSmooth(tmp1d, tmp1d, CV_GAUSSIAN, 5, 0, 0, 0);

		if (otsu_mode) { // Apply Otsu's method
			COND_PRINTF(", Otsu");
			cvThreshold(tmp1d, tmp1d, 0, 255, CV_THRESH_OTSU);
		}

		if (smooth_mode) { // Apply Gaussian smoothing
			COND_PRINTF(", Gauss");
			cvSmooth(tmp1d, tmp1d, CV_GAUSSIAN, 5, 0, 0, 0);
		}

		if (close_mode) {
			COND_PRINTF(", closE");
			int n = kernelSize;
			cvErode(tmp1d, tmp1d, NULL, n);
			cvDilate(tmp1d, tmp1d, NULL, n);
		}

		if (canny_mode) { // Apply Canny's method
			COND_PRINTF(", Canny");
			cvCanny(tmp1d, tmp1d, lowThreshold, highThreshold, 3);
			cvDilate(tmp1d, tmp1d, NULL, 1);
			cvErode(tmp1d, tmp1d, NULL, 1);
		}

		cvMerge(tmp1d, tmp1d, tmp1d, NULL, tmp3d);

		if (contour_mode) {
			COND_PRINTF(", contours(b)");
			CvMemStorage *storage = cvCreateMemStorage(0);
			CvSeq *contours = NULL;
			int n = cvFindContours(tmp1d, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));
			//COND_PRINTF(", (" << n <<","<< contours->total <<")contours");
			for (int i = 0; contours != NULL; contours = contours->h_next, i++) {
				int cc = (int)((float)(255 * i) / contours->total);
				CvScalar colorpx = CV_RGB((cc) % 256, (cc + 256 / 3) % 256, (cc + 256 * 2 / 3) % 256);
				cvDrawContours(tmp3d, contours, colorpx, CV_RGB(0, 0, 0), -1, CV_FILLED, 8, cvPoint(0, 0));
			}
		}

	}

	COND_PRINTF("\r");

	cvCopy(image1, image2, NULL);
	cvCopy(imageOut, imageOut2, NULL);
	cvCopy(tmp3d, imageOut, NULL);

	//cvReleaseImage(&tmp1d);
	//cvReleaseImage(&tmp3d);
	//cvReleaseImage(&tmp3d2);

	afterProcess(userdata);

	if (save_img) {
		save_img = 0;
		cvSaveImage(RESOURCES "output.png", imageOut);
	}

	return 0;
}
Пример #8
0
void FindOneWayDescriptor(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
    CvMat* avg, CvMat* eigenvectors)
{
    desc_idx = -1;
    pose_idx = -1;
    distance = 1e10;
//--------
	//PCA_coeffs precalculating
	CvMat* pca_coeffs = cvCreateMat(1, m_pca_dim_low, CV_32FC1);
	int patch_width = patch_size.width;
	int patch_height = patch_size.height;
	//if (avg)
	//{
		CvRect _roi = cvGetImageROI((IplImage*)patch);
		IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
		if(_roi.width != patch_width|| _roi.height != patch_height)
		{

			cvResize(patch, test_img);
			_roi = cvGetImageROI(test_img);
		}
		else
		{
			cvCopy(patch,test_img);
		}
		IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
		float sum = cvSum(test_img).val[0];
		cvConvertScale(test_img, patch_32f, 1.0f/sum);

		//ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
		//Projecting PCA
		CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
		CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1);
		cvProjectPCA(patch_mat, avg, eigenvectors, temp);
		CvMat temp1;
		cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1));
		cvCopy(&temp1, pca_coeffs);
		cvReleaseMat(&temp);
		cvReleaseMat(&patch_mat);
		//End of projecting

		cvReleaseImage(&patch_32f);
		cvReleaseImage(&test_img);
//	}

//--------

		//float* target = new float[m_pca_dim_low];
		//::flann::KNNResultSet res(1,pca_coeffs->data.fl,m_pca_dim_low);
		//::flann::SearchParams params;
		//params.checks = -1;

		//int maxDepth = 1000000;
		//int neighbors_count = 1;
		//int* neighborsIdx = new int[neighbors_count];
		//float* distances = new float[neighbors_count];
		//if (m_pca_descriptors_tree->findNearest(pca_coeffs->data.fl,neighbors_count,maxDepth,neighborsIdx,0,distances) > 0)
		//{
		//	desc_idx = neighborsIdx[0] / m_pose_count;
		//	pose_idx = neighborsIdx[0] % m_pose_count;
		//	distance = distances[0];
		//}
		//delete[] neighborsIdx;
		//delete[] distances;

		cv::Mat m_object(1, m_pca_dim_low, CV_32F);
		cv::Mat m_indices(1, 1, CV_32S);
		cv::Mat m_dists(1, 1, CV_32F);

		float* object_ptr = m_object.ptr<float>(0);
		for (int i=0;i<m_pca_dim_low;i++)
		{
			object_ptr[i] = pca_coeffs->data.fl[i];
		}

		m_pca_descriptors_tree->knnSearch(m_object, m_indices, m_dists, 1, cv::flann::SearchParams(-1) );

		desc_idx = ((int*)(m_indices.ptr<int>(0)))[0] / m_pose_count;
		pose_idx = ((int*)(m_indices.ptr<int>(0)))[0] % m_pose_count;
		distance = ((float*)(m_dists.ptr<float>(0)))[0];

	//	delete[] target;


//    for(int i = 0; i < desc_count; i++)
//    {
//        int _pose_idx = -1;
//        float _distance = 0;
//
//#if 0
//        descriptors[i].EstimatePose(patch, _pose_idx, _distance);
//#else
//		if (!avg)
//		{
//			descriptors[i].EstimatePosePCA(patch, _pose_idx, _distance, avg, eigenvectors);
//		}
//		else
//		{
//			descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
//		}
//#endif
//
//        if(_distance < distance)
//        {
//            desc_idx = i;
//            pose_idx = _pose_idx;
//            distance = _distance;
//        }
//    }
	cvReleaseMat(&pca_coeffs);
}
Пример #9
0
//**
void FindOneWayDescriptor(int desc_count, const CvOneWayDescriptor* descriptors, IplImage* patch, int n,
            std::vector<int>& desc_idxs, std::vector<int>&  pose_idxs, std::vector<float>& distances,
			CvMat* avg, CvMat* eigenvectors)
{
	for (int i=0;i<n;i++)
	{
		desc_idxs[i] = -1;
		pose_idxs[i] = -1;
		distances[i] = 1e10;
	}
	//--------
	//PCA_coeffs precalculating
	int m_pca_dim_low = descriptors[0].GetPCADimLow();
	CvMat* pca_coeffs = cvCreateMat(1, m_pca_dim_low, CV_32FC1);
	int patch_width = descriptors[0].GetPatchSize().width;
	int patch_height = descriptors[0].GetPatchSize().height;
	if (avg)
	{
		CvRect _roi = cvGetImageROI((IplImage*)patch);
		IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
		if(_roi.width != patch_width|| _roi.height != patch_height)
		{

			cvResize(patch, test_img);
			_roi = cvGetImageROI(test_img);
		}
		else
		{
			cvCopy(patch,test_img);
		}
		IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
		float sum = cvSum(test_img).val[0];
		cvConvertScale(test_img, patch_32f, 1.0f/sum);

		//ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
		//Projecting PCA
		CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
		CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1);
		cvProjectPCA(patch_mat, avg, eigenvectors, temp);
		CvMat temp1;
		cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1));
		cvCopy(&temp1, pca_coeffs);
		cvReleaseMat(&temp);
		cvReleaseMat(&patch_mat);
		//End of projecting

		cvReleaseImage(&patch_32f);
		cvReleaseImage(&test_img);
	}
	//--------



	for(int i = 0; i < desc_count; i++)
	{
		int _pose_idx = -1;
		float _distance = 0;

#if 0
		descriptors[i].EstimatePose(patch, _pose_idx, _distance);
#else
		if (!avg)
		{
			descriptors[i].EstimatePosePCA(patch, _pose_idx, _distance, avg, eigenvectors);
		}
		else
		{
			descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
		}
#endif

		for (int j=0;j<n;j++)
		{
			if(_distance < distances[j])
			{
				for (int k=(n-1);k > j;k--)
				{
					desc_idxs[k] = desc_idxs[k-1];
					pose_idxs[k] = pose_idxs[k-1];
					distances[k] = distances[k-1];
				}
				desc_idxs[j] = i;
				pose_idxs[j] = _pose_idx;
				distances[j] = _distance;
				break;
			}
		}
	}
	cvReleaseMat(&pca_coeffs);
}
Пример #10
0
/**
 * @brief Permite al usuario interactivamente seleccionar un objeto
 * @param regions Guarda los rectángulos que definen a cada objeto
 * @param MAX_OBJECTS Número máximo permitido de objetos a rastrear
 * @param argv Uso el nombre del video para poder leer el correspondiente fichero de regiones por defecto
 * @return El número de objetos seleccionados por el usuario (<= MAX_OBJECTS)
 */
int get_regions(CvRect **regions, int MAX_OBJECTS, char *argv ) {
	
	FILE *fich;
	char name[50], *p1, *p2;
	params p;
	CvRect* r;
	int x1, y1, x2, y2, w, h;
	
	// Si hay que leer desde fichero las regiones...
	if(MAX_OBJECTS > 0) {
		p.n = MAX_OBJECTS;

		strcpy( name, REGION_IN);
		p1 = strrchr( &argv[1], '/' );
		p2 = strrchr( &argv[1], '.' );
		strncat( name, (++p1), p2-p1 );
		strcat( name, "txt" );
		fich = fopen( name, "r" );
		if( ! fich ) {
			strcpy( name, REGION_IN);
			p1 = strrchr( &argv[1], '/' );
			p2 = strrchr( &argv[1], '.' );
			strncat( name, (++p1), (++p2)-p1 );
			strcat( name, "txt" );
			fich = fopen( name, "r" );
			if( ! fich ) {
				printf("Error leyendo las regiones iniciales\n");
				exit (-1);
			}
		}

		p.loc1 = std::vector<CvPoint>(MAX_OBJECTS);
		p.loc2 = std::vector<CvPoint>(MAX_OBJECTS);
		for( int i = 0; i < MAX_OBJECTS; ++i ) {
			int leidos = fscanf(fich, "%d", &p.loc1[i].x);
			leidos = fscanf(fich, "%d", &p.loc1[i].y);
			leidos = fscanf(fich, "%d", &p.loc2[i].x);
			leidos = fscanf(fich, "%d", &p.loc2[i].y);
		}
		fclose( fich );
	}

	// Si hay que seleccionarlas con el ratón...
	else {
		fprintf( stderr, "Selecciona la región a rastrear\n" );
		p.n = 0;
		cvNamedWindow( win_name, CV_WINDOW_AUTOSIZE );
		cvShowImage( win_name, first_frame );
		cvSetMouseCallback( win_name, &mouse, &p );
		cvWaitKey( 0 );
		cvDestroyWindow( win_name );
		if( p.n == 0 )
			return 0;
	}
	
	// Reservo espacio para la lista de regiones
	r = (CvRect*) malloc( p.n * sizeof( CvRect ) );

	for( int i = 0; i < p.n; ++i ) {
		x1 = MIN( p.loc1[i].x, p.loc2[i].x );
		x2 = MAX( p.loc1[i].x, p.loc2[i].x );
		y1 = MIN( p.loc1[i].y, p.loc2[i].y );
		y2 = MAX( p.loc1[i].y, p.loc2[i].y );
		w = x2 - x1;
		h = y2 - y1;
		
		//printf("%d %d %d %d\n", x1, y1, x2, y2);
		// Me aseguro que la altura y anchura es par
		w = ( w % 2 )? w : w+1;
		h = ( h % 2 )? h : h+1;
		r[i] = cvRect( x1, y1, w, h );
	}
	*regions = r;
	return p.n;
}
Пример #11
0
int main(int argc, char * argv[])
{
	if(argc < 2)
	{
		showUsage();
	}

	bool inv = false;
	for(int i=1; i<argc-1; ++i)
	{
		if(strcmp(argv[i], "-inv") == 0)
		{
			inv = true;
			printf(" Inversing option activated...\n");
			continue;
		}
		showUsage();
		printf(" Not recognized option: \"%s\"\n", argv[i]);
	}

	std::string path = argv[argc-1];
	printf(" Path = %s\n", path.c_str());

	UDirectory dir(path, "jpg bmp png tiff jpeg");
	if(!dir.isValid())
	{
		printf("Path invalid!\n");
		exit(-1);
	}

	std::string targetDirectory = path+"_joined";
	UDirectory::makeDir(targetDirectory);
	printf(" Creating directory \"%s\"\n", targetDirectory.c_str());


	std::string fileNameA = dir.getNextFilePath();
	std::string fileNameB = dir.getNextFilePath();

	int i=1;
	while(!fileNameA.empty() && !fileNameB.empty())
	{
		if(inv)
		{
			std::string tmp = fileNameA;
			fileNameA = fileNameB;
			fileNameB = tmp;
		}

		std::string ext = UFile::getExtension(fileNameA);

		std::string targetFilePath = targetDirectory+UDirectory::separator()+uNumber2Str(i++)+"."+ext;

		IplImage * imageA = cvLoadImage(fileNameA.c_str(), CV_LOAD_IMAGE_COLOR);
		IplImage * imageB = cvLoadImage(fileNameB.c_str(), CV_LOAD_IMAGE_COLOR);

		fileNameA.clear();
		fileNameB.clear();

		if(imageA && imageB)
		{
			CvSize sizeA = cvGetSize(imageA);
			CvSize sizeB = cvGetSize(imageB);
			CvSize targetSize = {0};
			targetSize.width = sizeA.width + sizeB.width;
			targetSize.height = sizeA.height > sizeB.height ? sizeA.height : sizeB.height;
			IplImage* targetImage = cvCreateImage(targetSize, imageA->depth, imageA->nChannels);
			if(targetImage)
			{
				cvSetImageROI( targetImage, cvRect( 0, 0, sizeA.width, sizeA.height ) );
				cvCopy( imageA, targetImage );
				cvSetImageROI( targetImage, cvRect( sizeA.width, 0, sizeB.width, sizeB.height ) );
				cvCopy( imageB, targetImage );
				cvResetImageROI( targetImage );

				if(!cvSaveImage(targetFilePath.c_str(), targetImage))
				{
					printf("Error : saving to \"%s\" goes wrong...\n", targetFilePath.c_str());
				}
				else
				{
					printf("Saved \"%s\" \n", targetFilePath.c_str());
				}

				cvReleaseImage(&targetImage);

				fileNameA = dir.getNextFilePath();
				fileNameB = dir.getNextFilePath();
			}
			else
			{
				printf("Error : can't allocated the target image with size (%d,%d)\n", targetSize.width, targetSize.height);
			}
		}
		else
		{
			printf("Error: loading images failed!\n");
		}

		if(imageA)
		{
			cvReleaseImage(&imageA);
		}
		if(imageB)
		{
			cvReleaseImage(&imageB);
		}
	}
	printf("%d files processed\n", i-1);

	return 0;
}
Пример #12
0
{
    CvSize size;
    int t;
    uchar cbBounds[3];
    uchar modMin[3];
    uchar modMax[3];
    CvBGCodeBookElem** cbmap;
    CvMemStorage* storage;
    CvBGCodeBookElem* freeList;
} CvBGCodeBookModel;

CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel();
CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model );

CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,
                                CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
                                const CvArr* mask CV_DEFAULT(0) );

CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image,
                             CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) );

CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh,
                                    CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
                                    const CvArr* mask CV_DEFAULT(0) );

CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1),
                               float perimScale CV_DEFAULT(4.f),
                               CvMemStorage* storage CV_DEFAULT(0),
                               CvPoint offset CV_DEFAULT(cvPoint(0,0)));

#ifdef __cplusplus
void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
{   /* Create contours: */
    IplImage*       pIB = NULL;
    CvSeq*          cnt = NULL;
    CvSeq*          cnt_list = cvCreateSeq(0,sizeof(CvSeq),sizeof(CvSeq*), storage );
    CvSeq*          clasters = NULL;
    int             claster_cur, claster_num;

    pIB = cvCloneImage(pFG);
    cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY);
    cvFindContours(pIB,storage, &cnt, sizeof(CvContour), CV_RETR_EXTERNAL);
    cvReleaseImage(&pIB);

    /* Create cnt_list.      */
    /* Process each contour: */
    for(; cnt; cnt=cnt->h_next)
    {
        cvSeqPush( cnt_list, &cnt);
    }

    claster_num = cvSeqPartition( cnt_list, storage, &clasters, CompareContour, NULL );

    for(claster_cur=0; claster_cur<claster_num; ++claster_cur)
    {
        int         cnt_cur;
        CvBlob      NewBlob;
        double      M00,X,Y,XX,YY; /* image moments */
        CvMoments   m;
        CvRect      rect_res = cvRect(-1,-1,-1,-1);
        CvMat       mat;

        for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur)
        {
            CvRect  rect;
            CvSeq*  cnt;
            int k = *(int*)cvGetSeqElem( clasters, cnt_cur );
            if(k!=claster_cur) continue;
            cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur );
            rect = ((CvContour*)cnt)->rect;

            if(rect_res.height<0)
            {
                rect_res = rect;
            }
            else
            {   /* Unite rects: */
                int x0,x1,y0,y1;
                x0 = MIN(rect_res.x,rect.x);
                y0 = MIN(rect_res.y,rect.y);
                x1 = MAX(rect_res.x+rect_res.width,rect.x+rect.width);
                y1 = MAX(rect_res.y+rect_res.height,rect.y+rect.height);
                rect_res.x = x0;
                rect_res.y = y0;
                rect_res.width = x1-x0;
                rect_res.height = y1-y0;
            }
        }

        if(rect_res.height < 1 || rect_res.width < 1)
        {
            X = 0;
            Y = 0;
            XX = 0;
            YY = 0;
        }
        else
        {
            cvMoments( cvGetSubRect(pFG,&mat,rect_res), &m, 0 );
            M00 = cvGetSpatialMoment( &m, 0, 0 );
            if(M00 <= 0 ) continue;
            X = cvGetSpatialMoment( &m, 1, 0 )/M00;
            Y = cvGetSpatialMoment( &m, 0, 1 )/M00;
            XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X;
            YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y;
        }
        NewBlob = cvBlob(rect_res.x+(float)X,rect_res.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY)));
        pBlobs->AddBlob(&NewBlob);

    }   /* Next cluster. */

    #if 0
    {   // Debug info:
        IplImage* pI = cvCreateImage(cvSize(pFG->width,pFG->height),IPL_DEPTH_8U,3);
        cvZero(pI);
        for(claster_cur=0; claster_cur<claster_num; ++claster_cur)
        {
            int         cnt_cur;
            CvScalar    color = CV_RGB(rand()%256,rand()%256,rand()%256);

            for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur)
            {
                CvSeq*  cnt;
                int k = *(int*)cvGetSeqElem( clasters, cnt_cur );
                if(k!=claster_cur) continue;
                cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur );
                cvDrawContours( pI, cnt, color, color, 0, 1, 8);
            }

            CvBlob* pB = pBlobs->GetBlob(claster_cur);
            int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB));
            cvEllipse( pI,
                cvPointFrom32f(CV_BLOB_CENTER(pB)),
                cvSize(MAX(1,x), MAX(1,y)),
                0, 0, 360,
                color, 1 );
        }

        cvNamedWindow( "Clusters", 0);
        cvShowImage( "Clusters",pI );

        cvReleaseImage(&pI);

    }   /* Debug info. */
    #endif

}   /* cvFindBlobsByCCClasters */
/* cvDetectNewBlobs
 * Return 1 and fill blob pNewBlob  with
 * blob parameters if new blob is detected:
 */
int CvBlobDetectorCC::DetectNewBlob(IplImage* /*pImg*/, IplImage* pFGMask, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList)
{
    int         result = 0;
    CvSize      S = cvSize(pFGMask->width,pFGMask->height);

    /* Shift blob list: */
    {
        int     i;
        if(m_pBlobLists[SEQ_SIZE-1]) delete m_pBlobLists[SEQ_SIZE-1];

        for(i=SEQ_SIZE-1; i>0; --i)  m_pBlobLists[i] = m_pBlobLists[i-1];

        m_pBlobLists[0] = new CvBlobSeq;

    }   /* Shift blob list. */

    /* Create contours and add new blobs to blob list: */
    {   /* Create blobs: */
        CvBlobSeq       Blobs;
        CvMemStorage*   storage = cvCreateMemStorage();

        if(m_Clastering)
        {   /* Glue contours: */
            cvFindBlobsByCCClasters(pFGMask, &Blobs, storage );
        }   /* Glue contours. */
        else
        { /**/
            IplImage*       pIB = cvCloneImage(pFGMask);
            CvSeq*          cnts = NULL;
            CvSeq*          cnt = NULL;
            cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY);
            cvFindContours(pIB,storage, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL);

            /* Process each contour: */
            for(cnt = cnts; cnt; cnt=cnt->h_next)
            {
                CvBlob  NewBlob;
                /* Image moments: */
                double      M00,X,Y,XX,YY;
                CvMoments   m;
                CvRect      r = ((CvContour*)cnt)->rect;
                CvMat       mat;
                if(r.height < S.height*m_HMin || r.width < S.width*m_WMin) continue;
                cvMoments( cvGetSubRect(pFGMask,&mat,r), &m, 0 );
                M00 = cvGetSpatialMoment( &m, 0, 0 );
                if(M00 <= 0 ) continue;
                X = cvGetSpatialMoment( &m, 1, 0 )/M00;
                Y = cvGetSpatialMoment( &m, 0, 1 )/M00;
                XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X;
                YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y;
                NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY)));
                Blobs.AddBlob(&NewBlob);

            }   /* Next contour. */

            cvReleaseImage(&pIB);

        }   /* One contour - one blob. */

        {   /* Delete small and intersected blobs: */
            int i;
            for(i=Blobs.GetBlobNum(); i>0; i--)
            {
                CvBlob* pB = Blobs.GetBlob(i-1);

                if(pB->h < S.height*m_HMin || pB->w < S.width*m_WMin)
                {
                    Blobs.DelBlob(i-1);
                    continue;
                }

                if(pOldBlobList)
                {
                    int j;
                    for(j=pOldBlobList->GetBlobNum(); j>0; j--)
                    {
                        CvBlob* pBOld = pOldBlobList->GetBlob(j-1);
                        if((fabs(pBOld->x-pB->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pB))) &&
                           (fabs(pBOld->y-pB->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pB))))
                        {   /* Intersection detected, delete blob from list: */
                            Blobs.DelBlob(i-1);
                            break;
                        }
                    }   /* Check next old blob. */
                }   /*  if pOldBlobList. */
            }   /*  Check next blob. */
        }   /*  Delete small and intersected blobs. */

        {   /* Bubble-sort blobs by size: */
            int N = Blobs.GetBlobNum();
            int i,j;
            for(i=1; i<N; ++i)
            {
                for(j=i; j>0; --j)
                {
                    CvBlob  temp;
                    float   AreaP, AreaN;
                    CvBlob* pP = Blobs.GetBlob(j-1);
                    CvBlob* pN = Blobs.GetBlob(j);
                    AreaP = CV_BLOB_WX(pP)*CV_BLOB_WY(pP);
                    AreaN = CV_BLOB_WX(pN)*CV_BLOB_WY(pN);
                    if(AreaN < AreaP)break;
                    temp = pN[0];
                    pN[0] = pP[0];
                    pP[0] = temp;
                }
            }

            /* Copy only first 10 blobs: */
            for(i=0; i<MIN(N,10); ++i)
            {
                m_pBlobLists[0]->AddBlob(Blobs.GetBlob(i));
            }

        }   /* Sort blobs by size. */

        cvReleaseMemStorage(&storage);

    }   /* Create blobs. */

    {   /* Shift each track: */
        int j;
        for(j=0; j<m_TrackNum; ++j)
        {
            int     i;
            DefSeq* pTrack = m_TrackSeq+j;

            for(i=SEQ_SIZE-1; i>0; --i)
                pTrack->pBlobs[i] = pTrack->pBlobs[i-1];

            pTrack->pBlobs[0] = NULL;
            if(pTrack->size == SEQ_SIZE)pTrack->size--;
        }
    }   /* Shift each track. */

    /* Analyze blob list to find best blob trajectory: */
    {
        double      BestError = -1;
        int         BestTrack = -1;;
        CvBlobSeq*  pNewBlobs = m_pBlobLists[0];
        int         i;
        int         NewTrackNum = 0;
        for(i=pNewBlobs->GetBlobNum(); i>0; --i)
        {
            CvBlob* pBNew = pNewBlobs->GetBlob(i-1);
            int     j;
            int     AsignedTrack = 0;
            for(j=0; j<m_TrackNum; ++j)
            {
                double  dx,dy;
                DefSeq* pTrack = m_TrackSeq+j;
                CvBlob* pLastBlob = pTrack->size>0?pTrack->pBlobs[1]:NULL;
                if(pLastBlob == NULL) continue;
                dx = fabs(CV_BLOB_X(pLastBlob)-CV_BLOB_X(pBNew));
                dy = fabs(CV_BLOB_Y(pLastBlob)-CV_BLOB_Y(pBNew));
                if(dx > 2*CV_BLOB_WX(pLastBlob) || dy > 2*CV_BLOB_WY(pLastBlob)) continue;
                AsignedTrack++;

                if(pTrack->pBlobs[0]==NULL)
                {   /* Fill existed track: */
                    pTrack->pBlobs[0] = pBNew;
                    pTrack->size++;
                }
                else if((m_TrackNum+NewTrackNum)<SEQ_NUM)
                {   /* Duplicate existed track: */
                    m_TrackSeq[m_TrackNum+NewTrackNum] = pTrack[0];
                    m_TrackSeq[m_TrackNum+NewTrackNum].pBlobs[0] = pBNew;
                    NewTrackNum++;
                }
            }   /* Next track. */

            if(AsignedTrack==0 && (m_TrackNum+NewTrackNum)<SEQ_NUM )
            {   /* Initialize new track: */
                m_TrackSeq[m_TrackNum+NewTrackNum].size = 1;
                m_TrackSeq[m_TrackNum+NewTrackNum].pBlobs[0] = pBNew;
                NewTrackNum++;
            }
        }   /* Next new blob. */

        m_TrackNum += NewTrackNum;

        /* Check each track: */
        for(i=0; i<m_TrackNum; ++i)
        {
            int     Good = 1;
            DefSeq* pTrack = m_TrackSeq+i;
            CvBlob* pBNew = pTrack->pBlobs[0];
            if(pTrack->size != SEQ_SIZE) continue;
            if(pBNew == NULL ) continue;

            /* Check intersection last blob with existed: */
            if(Good && pOldBlobList)
            {
                int k;
                for(k=pOldBlobList->GetBlobNum(); k>0; --k)
                {
                    CvBlob* pBOld = pOldBlobList->GetBlob(k-1);
                    if((fabs(pBOld->x-pBNew->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pBNew))) &&
                       (fabs(pBOld->y-pBNew->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pBNew))))
                        Good = 0;
                }
            }   /* Check intersection last blob with existed. */

            /* Check distance to image border: */
            if(Good)
            {   /* Check distance to image border: */
                float    dx = MIN(pBNew->x,S.width-pBNew->x)/CV_BLOB_RX(pBNew);
                float    dy = MIN(pBNew->y,S.height-pBNew->y)/CV_BLOB_RY(pBNew);
                if(dx < m_MinDistToBorder || dy < m_MinDistToBorder) Good = 0;
            }   /* Check distance to image border. */

            /* Check uniform motion: */
            if(Good)
            {   /* Check uniform motion: */
                double      Error = 0;
                int         N = pTrack->size;
                CvBlob**    pBL = pTrack->pBlobs;
                float       sum[2] = {0,0};
                float       jsum[2] = {0,0};
                float       a[2],b[2]; /* estimated parameters of moving x(t) = a*t+b*/
                int         j;

                for(j=0; j<N; ++j)
                {
                    float   x = pBL[j]->x;
                    float   y = pBL[j]->y;
                    sum[0] += x;
                    jsum[0] += j*x;
                    sum[1] += y;
                    jsum[1] += j*y;
                }

                a[0] = 6*((1-N)*sum[0]+2*jsum[0])/(N*(N*N-1));
                b[0] = -2*((1-2*N)*sum[0]+3*jsum[0])/(N*(N+1));
                a[1] = 6*((1-N)*sum[1]+2*jsum[1])/(N*(N*N-1));
                b[1] = -2*((1-2*N)*sum[1]+3*jsum[1])/(N*(N+1));

                for(j=0; j<N; ++j)
                {
                    Error +=
                        pow(a[0]*j+b[0]-pBL[j]->x,2)+
                        pow(a[1]*j+b[1]-pBL[j]->y,2);
                }

                Error = sqrt(Error/N);

                if( Error > S.width*0.01 ||
                    fabs(a[0])>S.width*0.1 ||
                    fabs(a[1])>S.height*0.1)
                    Good = 0;

                /* New best trajectory: */
                if(Good && (BestError == -1 || BestError > Error))
                {   /* New best trajectory: */
                    BestTrack = i;
                    BestError = Error;
                }   /* New best trajectory. */
            }   /*  Check uniform motion. */
        }   /*  Next track. */

        #if 0
        {   /**/
            printf("BlobDetector configurations = %d [",m_TrackNum);
            int i;
            for(i=0; i<SEQ_SIZE; ++i)
            {
                printf("%d,",m_pBlobLists[i]?m_pBlobLists[i]->GetBlobNum():0);
            }
            printf("]\n");
        }
        #endif

        if(BestTrack >= 0)
        {   /* Put new blob to output and delete from blob list: */
            assert(m_TrackSeq[BestTrack].size == SEQ_SIZE);
            assert(m_TrackSeq[BestTrack].pBlobs[0]);
            pNewBlobList->AddBlob(m_TrackSeq[BestTrack].pBlobs[0]);
            m_TrackSeq[BestTrack].pBlobs[0] = NULL;
            m_TrackSeq[BestTrack].size--;
            result = 1;
        }   /* Put new blob to output and mark in blob list to delete. */
    }   /*  Analyze blod list to find best blob trajectory. */

    {   /* Delete bad tracks: */
        int i;
        for(i=m_TrackNum-1; i>=0; --i)
        {   /* Delete bad tracks: */
            if(m_TrackSeq[i].pBlobs[0]) continue;
            if(m_TrackNum>0)
                m_TrackSeq[i] = m_TrackSeq[--m_TrackNum];
        }   /* Delete bad tracks: */
    }

#ifdef USE_OBJECT_DETECTOR
    if( m_split_detector && pNewBlobList->GetBlobNum() > 0 )
    {
        int num_new_blobs = pNewBlobList->GetBlobNum();
        int i = 0;

        if( m_roi_seq ) cvClearSeq( m_roi_seq );
        m_debug_blob_seq.Clear();
        for( i = 0; i < num_new_blobs; ++i )
        {
            CvBlob* b = pNewBlobList->GetBlob(i);
            CvMat roi_stub;
            CvMat* roi_mat = 0;
            CvMat* scaled_roi_mat = 0;

            CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 0 );
            m_debug_blob_seq.AddBlob(&d_b);

            float scale = m_param_roi_scale * m_min_window_size.height / CV_BLOB_WY(b);

            float b_width =   MAX(CV_BLOB_WX(b), m_min_window_size.width / scale)
                            + (m_param_roi_scale - 1.0F) * (m_min_window_size.width / scale)
                            + 2.0F * m_max_border / scale;
            float b_height = CV_BLOB_WY(b) * m_param_roi_scale + 2.0F * m_max_border / scale;

            CvRect roi = cvRectIntersection( cvRect( cvFloor(CV_BLOB_X(b) - 0.5F*b_width),
                                                     cvFloor(CV_BLOB_Y(b) - 0.5F*b_height),
                                                     cvCeil(b_width), cvCeil(b_height) ),
                                             cvRect( 0, 0, pImg->width, pImg->height ) );
            if( roi.width <= 0 || roi.height <= 0 )
                continue;

            if( m_roi_seq ) cvSeqPush( m_roi_seq, &roi );

            roi_mat = cvGetSubRect( pImg, &roi_stub, roi );
            scaled_roi_mat = cvCreateMat( cvCeil(scale*roi.height), cvCeil(scale*roi.width), CV_8UC3 );
            cvResize( roi_mat, scaled_roi_mat );

            m_detected_blob_seq.Clear();
            m_split_detector->Detect( scaled_roi_mat, &m_detected_blob_seq );
            cvReleaseMat( &scaled_roi_mat );

            for( int k = 0; k < m_detected_blob_seq.GetBlobNum(); ++k )
            {
                CvDetectedBlob* b = (CvDetectedBlob*) m_detected_blob_seq.GetBlob(k);

                /* scale and shift each detected blob back to the original image coordinates */
                CV_BLOB_X(b) = CV_BLOB_X(b) / scale + roi.x;
                CV_BLOB_Y(b) = CV_BLOB_Y(b) / scale + roi.y;
                CV_BLOB_WX(b) /= scale;
                CV_BLOB_WY(b) /= scale;

                CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 1,
                        b->response );
                m_debug_blob_seq.AddBlob(&d_b);
            }

            if( m_detected_blob_seq.GetBlobNum() > 1 )
            {
                /*
                 * Split blob.
                 * The original blob is replaced by the first detected blob,
                 * remaining detected blobs are added to the end of the sequence:
                 */
                CvBlob* first_b = m_detected_blob_seq.GetBlob(0);
                CV_BLOB_X(b)  = CV_BLOB_X(first_b);  CV_BLOB_Y(b)  = CV_BLOB_Y(first_b);
                CV_BLOB_WX(b) = CV_BLOB_WX(first_b); CV_BLOB_WY(b) = CV_BLOB_WY(first_b);

                for( int j = 1; j < m_detected_blob_seq.GetBlobNum(); ++j )
                {
                    CvBlob* detected_b = m_detected_blob_seq.GetBlob(j);
                    pNewBlobList->AddBlob(detected_b);
                }
            }
        }   /* For each new blob. */

        for( i = 0; i < pNewBlobList->GetBlobNum(); ++i )
        {
            CvBlob* b = pNewBlobList->GetBlob(i);
            CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 2 );
            m_debug_blob_seq.AddBlob(&d_b);
        }
    }   // if( m_split_detector )
#endif

    return result;

}   /* cvDetectNewBlob */
Пример #15
0
CvRect GetMinRect(IplImage* srcImg, CvScalar bgColor/*=CV_RGB(0,0,0)*/)//返回图像的外包围矩形范围
{
	CvRect roiRc=cvRect(0,0,0,0);
	if(!CV_IS_IMAGE(srcImg) )
	{
		return roiRc;
	}
	bool isGetTop=false,isGetBottom=false,isGetLeft=false,isGetRight=false;//是否得到各边界
	for(int i=0; i<srcImg->height; ++i){
		for(int top=0; top<srcImg->width && !isGetTop; ++top){
			CvScalar piexColor=bgColor;
			for (int k=0; k<srcImg->nChannels; ++k){
				uchar piex=((uchar*)srcImg->imageData + i*srcImg->widthStep)[top*srcImg->nChannels+k];
				piexColor.val[k]=piex;
			}
			if (piexColor.val[0]==bgColor.val[0] &&
				piexColor.val[1]==bgColor.val[1] &&
				piexColor.val[2]==bgColor.val[2] &&
				piexColor.val[3]==bgColor.val[3]){
					continue;
			}else{
				roiRc.y=i;//得到最顶行
				isGetTop=true;
				break;
			}
		}
	}
	for(int i=srcImg->height-1; i>0; --i){
		for(int bottom=0; bottom < srcImg->width && !isGetBottom; ++bottom){
			CvScalar piexColor=bgColor;
			for (int k=0; k<srcImg->nChannels; ++k){
				uchar piex=((uchar*)srcImg->imageData + i*srcImg->widthStep)[bottom*srcImg->nChannels+k];
				piexColor.val[k]=piex;
			}
			if (piexColor.val[0]==bgColor.val[0] &&
				piexColor.val[1]==bgColor.val[1] &&
				piexColor.val[2]==bgColor.val[2] &&
				piexColor.val[3]==bgColor.val[3]){
					continue;
			}else{
				roiRc.height=i;//得到最低行
				isGetBottom=true;
				break;
			}
		}
	}

	for(int i=0; i<srcImg->width; ++i){
		for(int left=0; left<srcImg->height && !isGetLeft; ++left){
			CvScalar piexColor=bgColor;
			for (int k=0; k<srcImg->nChannels; ++k){
				uchar piex=((uchar*)srcImg->imageData + left*srcImg->widthStep)[i*srcImg->nChannels+k];
				piexColor.val[k]=piex;
			}
			if (piexColor.val[0]==bgColor.val[0] &&
				piexColor.val[1]==bgColor.val[1] &&
				piexColor.val[2]==bgColor.val[2] &&
				piexColor.val[3]==bgColor.val[3]){
					continue;
			}else{
				roiRc.x=i;//得到最左列
				isGetLeft=true;
				break;
			}
		}
	}
	for(int i=srcImg->width - 1; i>0; --i)
	{
		for(int right=0; right<srcImg->height && !isGetRight; ++right){
			CvScalar piexColor=bgColor;
			for (int k=0; k<srcImg->nChannels; ++k){
				uchar piex=((uchar*)srcImg->imageData + right*srcImg->widthStep)[i*srcImg->nChannels+k];
				piexColor.val[k]=piex;
			}
			if (piexColor.val[0]==bgColor.val[0] &&
				piexColor.val[1]==bgColor.val[1] &&
				piexColor.val[2]==bgColor.val[2] &&
				piexColor.val[3]==bgColor.val[3]){
					continue;
			}else{
				roiRc.width=i;//得到最右列
				isGetRight=true;
				break;
			}
		}
	}
	if (!isGetLeft || !isGetRight || !isGetTop || !isGetBottom)
	{
		return roiRc;//没找到目标,全部都是背景
	}
	roiRc.width=roiRc.width-roiRc.x+1;
	roiRc.height=roiRc.height-roiRc.y+1;

	if (roiRc.x+roiRc.width>srcImg->width)
	{
		roiRc.width--;
	}
	if (roiRc.y+roiRc.height>srcImg->height)
	{
		roiRc.height--;
	}
	return roiRc;

}
Пример #16
0
static IplImage*  symmetry_img(IplImage* pImg, int loop)
{
	int T=10;
	int X=0;
	int i=0;
	int j=0;
	IplImage* I=pImg;
	IplImage* IL;
	IplImage* IR;
	IplImage* IRf;
	IplImage* pMask;
	IplImage* maskImg;
	IplImage* temp;
	CvSize size;
	CvScalar scale={0};
	CvScalar scaler;
	double value1,value2;
	int width = cvGetSize(pImg).width;
	int height = cvGetSize(pImg).height;
	int rect_width=width/2;
	int rect_height=height;

	if (loop == 0)
	{
		return pImg;
	}
	else
	{
		X = (int)width/2;

		cvSetImageROI(I,cvRect(0, 0, rect_width, rect_height));
		IL = cvCreateImage(cvSize( rect_width, rect_height), IPL_DEPTH_8U, pImg->nChannels);
		cvCopy(I, IL, 0);
		cvResetImageROI(I);

		cvSetImageROI(I,cvRect(X, 0, rect_width, rect_height));
		IR = cvCreateImage(cvSize(rect_width, rect_height), IPL_DEPTH_8U, pImg->nChannels);
		cvCopy(I, IR, 0);
		cvResetImageROI(I);	

		IL = symmetry_img(IL,loop-1);
		IR = symmetry_img(IR,loop-1);

		IRf = cvCreateImage(cvGetSize(IR), IPL_DEPTH_8U, IR->nChannels);
		cvFlip(IR,IRf,1);
		for (i=0;i<IR->height;i++)
		{
			for(j=0;j<IR->width;j++)
			{
				value1 = cvGet2D(IL, i,j).val[0];
				value2 = cvGet2D(IRf,i,j).val[0];
				if (abs(value1-value2)>T)
				{
					cvSet2D(IL,  i, j,cvScalar(0,0,0,0));
					cvSet2D(IRf, i, j,cvScalar(0,0,0,0));
				}	
			}
		}

		cvSetImageROI(I,cvRect(0, 0, rect_width, rect_height));
		cvCopy(IL, I, NULL);
		cvResetImageROI(I);

		cvSetImageROI(I,cvRect(X, 0, rect_width, rect_height));
		cvFlip(IRf, IR, 1);
		cvCopy(IR, I, 0);
		cvResetImageROI(I);	
		
		cvReleaseImage(&IL);
		cvReleaseImage(&IR);
		cvReleaseImage(&IRf);
		return I;
	}
}
Пример #17
0
int main(int argc, char *argv[])
{

    /////
    /*
        CvVideoWriter *writer = 0;
    int isColor = 1;
    int fps     = 25;  // or 30
    int frameW  = 6totalnumberofpoints; // 744 for firewire cameras
    int frameH  = 480; // 480 for firewire cameras
    writer=cvCreateVideoWriter("out.avi",-1,
                         fps,cvSize(frameW,frameH),isColor);
     */
    /////
    opencvWebcam webcam;
    cvNamedWindow("template");
    cvNamedWindow("carimage");

    cvNamedWindow("image");
    cvSetMouseCallback( "image", on_mouse, 0 );

    int counter=0;

    LKInverseComp lk("./aam.template");
    lk.setParamtxty(175,140);

    cvShowImage( "image",camImage  );


    static CvMat* newMat=cvCreateMat(1,totalnumberofpoints,CV_64FC1);
    int frameIndex=0;
    int minFrameIndex=0;
    int stepSize=1;
    double min=0;
    CvVideoWriter *writer = 0;
    int isColor = 1;
    int fps     = 25;  // or 30
    int frameW  = 1280;// 744 for firewire cameras
    int frameH  = 480; // for firewire cameras
    //writer=cvCreateVideoWriter("out1.mpg",-1,
    //                           fps,cvSize(frameW,frameH),isColor);
    writer=cvCreateVideoWriter("out1.avi",CV_FOURCC('D', 'I', 'V', 'X'),
                               fps,cvSize(frameW,frameH),isColor);
    int flag=0;
    if (webcam.startCamera()==1)
    {
        camImage = webcam.queryFrame();

        camImage = webcam.queryFrame();

        camImage = webcam.queryFrame();

        camImage = webcam.queryFrame();
        camImage = webcam.queryFrame();
        camImage = webcam.queryFrame();

        camImage = webcam.queryFrame();
        camImage = webcam.queryFrame();
        for (;;)
        {
            double t = (double)cvGetTickCount();
            double t1=0;
            //backGroundSubtraction = webcam.queryFrame();

            camImage = webcam.queryFrame();
            if (camImage==NULL)
                break;
            frameIndex++;
            CvSize photo_size = cvSize(camImage->width,camImage->height);
            grayIterateImage = cvCreateImage(photo_size, IPL_DEPTH_8U, 1);
            static IplImage * colorresized =cvCreateImage(cvSize(1280,480), IPL_DEPTH_8U, 3);
            static IplImage * carricatureImg = cvCreateImage(photo_size, IPL_DEPTH_8U, 3);
            cvZero(carricatureImg);
            cvConvertScale(carricatureImg,carricatureImg,0,255);
            cvCvtColor(camImage, grayIterateImage, CV_RGB2GRAY);
            iterateImage = cvCreateImage(photo_size, IPL_DEPTH_8U, 1);
            cvCopy(grayIterateImage, iterateImage);
            //cvShowImage( "image",camImage);

            //    if (flag==0)
            newMat =lk.currentEstimate();



            for (int i = 1; i <= lk.newDelaunay->totalNumberofTriangles; i++ )
            {


                int node1=lk.newDelaunay->colorTriangleCodeFixed[i][0];
                int node2=lk.newDelaunay->colorTriangleCodeFixed[i][1];
                int node3=lk.newDelaunay->colorTriangleCodeFixed[i][2];

                CvScalar s1,s2;
                s1 =cvGet2D(newMat,0,2*(node1));
                s2 =cvGet2D(newMat,0,2*(node1) + 1);

                CvPoint p1,p2,p3;

                p1=cvPoint(s1.val[0],s2.val[0]);

                s1 =cvGet2D(newMat,0,2*(node2));
                s2 =cvGet2D(newMat,0,2*(node2) + 1);
                p2=cvPoint(s1.val[0],s2.val[0]);


                s1 =cvGet2D(newMat,0,2*(node3));
                s2 =cvGet2D(newMat,0,2*(node3) + 1);
                p3=cvPoint(s1.val[0],s2.val[0]);
                cvLine(camImage, p1, p2, cvScalar(0,255,0), 1);
                cvLine(camImage, p2, p3, cvScalar(0,255,0), 1);
                cvLine(camImage, p1, p3, cvScalar(0,255,0), 1);



            }
            CvScalar px1,px2,px3,px4, py1,py2,py3,py4,px5,py5;
            CvPoint p1[4],p2[9],p3[15];

            px1 =cvGet2D(newMat,0,2*(29));
            py1 =cvGet2D(newMat,0,2*(29) + 1);
            px2 =cvGet2D(newMat,0,2*(30));
            py2 =cvGet2D(newMat,0,2*(30) + 1);

            px3 =cvGet2D(newMat,0,2*(31));
            py3 =cvGet2D(newMat,0,2*(31) + 1);


            px4 =cvGet2D(newMat,0,2*(32));
            py4 =cvGet2D(newMat,0,2*(32) + 1);


            px5 =cvGet2D(newMat,0,2*(33));
            py5 =cvGet2D(newMat,0,2*(33) + 1);

            p1[0].x=(cvGet2D(newMat,0,2*(29))).val[0];
            p1[0].y=py1.val[0];
            p1[1].x=px2.val[0];
            p1[1].y=py2.val[0];
            p1[2].x=px3.val[0];
            p1[2].y=py3.val[0];
            p1[3].x=px4.val[0];
            p1[3].y=py4.val[0];
            p1[4].x=px5.val[0];
            p1[4].y=py5.val[0];


            drawCRSpline(carricatureImg,p1,5);



            px1 =cvGet2D(newMat,0,2*(34));
            py1 =cvGet2D(newMat,0,2*(34) + 1);
            px2 =cvGet2D(newMat,0,2*(35));
            py2 =cvGet2D(newMat,0,2*(35) + 1);

            px3 =cvGet2D(newMat,0,2*(36));
            py3 =cvGet2D(newMat,0,2*(36) + 1);


            px4 =cvGet2D(newMat,0,2*(37));
            py4 =cvGet2D(newMat,0,2*(37) + 1);


            px5 =cvGet2D(newMat,0,2*(38));
            py5 =cvGet2D(newMat,0,2*(38) + 1);

            p1[0].x=px1.val[0];
            p1[0].y=py1.val[0];
            p1[1].x=px2.val[0];
            p1[1].y=py2.val[0];
            p1[2].x=px3.val[0];
            p1[2].y=py3.val[0];
            p1[3].x=px4.val[0];
            p1[3].y=py4.val[0];
            p1[4].x=px5.val[0];
            p1[4].y=py5.val[0];

            drawCRSpline(carricatureImg,p1,5);

            p1[0].x=(cvGet2D(newMat,0,2*(39))).val[0];
            p1[0].y=(cvGet2D(newMat,0,2*(39) + 1)).val[0];
            p1[1].x=(cvGet2D(newMat,0,2*(40))).val[0];
            p1[1].y=(cvGet2D(newMat,0,2*(40) + 1)).val[0];
            p1[2].x=(cvGet2D(newMat,0,2*(41))).val[0];
            p1[2].y=(cvGet2D(newMat,0,2*(41) + 1)).val[0];
            p1[3].x=(cvGet2D(newMat,0,2*(42))).val[0];
            p1[3].y=(cvGet2D(newMat,0,2*(42) + 1)).val[0];
            p1[4].x=(cvGet2D(newMat,0,2*(43))).val[0];
            p1[4].y=(cvGet2D(newMat,0,2*(43) + 1)).val[0];

            drawCRSpline(carricatureImg,p1,5);


            p1[0].x=(cvGet2D(newMat,0,2*(39))).val[0];
            p1[0].y=(cvGet2D(newMat,0,2*(39) + 1)).val[0];
            p1[1].x=(cvGet2D(newMat,0,2*(46))).val[0];
            p1[1].y=(cvGet2D(newMat,0,2*(46) + 1)).val[0];
            p1[2].x=(cvGet2D(newMat,0,2*(45))).val[0];
            p1[2].y=(cvGet2D(newMat,0,2*(45) + 1)).val[0];
            p1[3].x=(cvGet2D(newMat,0,2*(44))).val[0];
            p1[3].y=(cvGet2D(newMat,0,2*(44) + 1)).val[0];
            p1[4].x=(cvGet2D(newMat,0,2*(43))).val[0];
            p1[4].y=(cvGet2D(newMat,0,2*(43) + 1)).val[0];

            drawCRSpline(carricatureImg,p1,5);



            p1[0].x=(cvGet2D(newMat,0,2*(39))).val[0];
            p1[0].y=(cvGet2D(newMat,0,2*(39) + 1)).val[0];
            p1[1].x=(cvGet2D(newMat,0,2*(58))).val[0];
            p1[1].y=(cvGet2D(newMat,0,2*(58) + 1)).val[0];
            p1[2].x=(cvGet2D(newMat,0,2*(59))).val[0];
            p1[2].y=(cvGet2D(newMat,0,2*(59) + 1)).val[0];
            p1[3].x=(cvGet2D(newMat,0,2*(60))).val[0];
            p1[3].y=(cvGet2D(newMat,0,2*(60) + 1)).val[0];
            p1[4].x=(cvGet2D(newMat,0,2*(43))).val[0];
            p1[4].y=(cvGet2D(newMat,0,2*(43) + 1)).val[0];

            drawCRSpline(carricatureImg,p1,5);



            p1[0].x=(cvGet2D(newMat,0,2*(39))).val[0];
            p1[0].y=(cvGet2D(newMat,0,2*(39) + 1)).val[0];
            p1[1].x=(cvGet2D(newMat,0,2*(61))).val[0];
            p1[1].y=(cvGet2D(newMat,0,2*(61) + 1)).val[0];
            p1[2].x=(cvGet2D(newMat,0,2*(62))).val[0];
            p1[2].y=(cvGet2D(newMat,0,2*(62) + 1)).val[0];
            p1[3].x=(cvGet2D(newMat,0,2*(43))).val[0];
            p1[3].y=(cvGet2D(newMat,0,2*(43) + 1)).val[0];
            p1[4].x=(cvGet2D(newMat,0,2*(43))).val[0];
            p1[4].y=(cvGet2D(newMat,0,2*(43) + 1)).val[0];

            drawCRSpline(carricatureImg,p1,4);




            p3[0].x=(cvGet2D(newMat,0,2*(0))).val[0];
            p3[0].y=(cvGet2D(newMat,0,2*(0) + 1)).val[0];

            p3[1].x=(cvGet2D(newMat,0,2*(4))).val[0];
            p3[1].y=(cvGet2D(newMat,0,2*(4) + 1)).val[0];

            p3[2].x=(cvGet2D(newMat,0,2*(6))).val[0];
            p3[2].y=(cvGet2D(newMat,0,2*(6) + 1)).val[0];

            p3[3].x=(cvGet2D(newMat,0,2*(8))).val[0];
            p3[3].y=(cvGet2D(newMat,0,2*(8) + 1)).val[0];



            p3[4].x=(cvGet2D(newMat,0,2*(12))).val[0];
            p3[4].y=(cvGet2D(newMat,0,2*(12) + 1)).val[0];


            drawCRSpline(carricatureImg,p3,5,1);



            p3[0].x=(cvGet2D(newMat,0,2*(21))).val[0];
            p3[0].y=(cvGet2D(newMat,0,2*(21) + 1)).val[0];

            p3[1].x=(cvGet2D(newMat,0,2*(22))).val[0];
            p3[1].y=(cvGet2D(newMat,0,2*(22) + 1)).val[0];

            p3[2].x=(cvGet2D(newMat,0,2*(23))).val[0];
            p3[2].y=(cvGet2D(newMat,0,2*(23) + 1)).val[0];

            p3[3].x=(cvGet2D(newMat,0,2*(24))).val[0];
            p3[3].y=(cvGet2D(newMat,0,2*(24) + 1)).val[0];



            p3[4].x=(cvGet2D(newMat,0,2*(25))).val[0];
            p3[4].y=(cvGet2D(newMat,0,2*(25) + 1)).val[0];

            p3[5].x=(cvGet2D(newMat,0,2*(26))).val[0];
            p3[5].y=(cvGet2D(newMat,0,2*(26) + 1)).val[0];






            p3[6].x=(cvGet2D(newMat,0,2*(21))).val[0];
            p3[6].y=(cvGet2D(newMat,0,2*(21) + 1)).val[0];

            drawCRSpline(carricatureImg,p3,7,1);


            p3[0].x=(cvGet2D(newMat,0,2*(13))).val[0];
            p3[0].y=(cvGet2D(newMat,0,2*(13) + 1)).val[0];

            p3[1].x=(cvGet2D(newMat,0,2*(14))).val[0];
            p3[1].y=(cvGet2D(newMat,0,2*(14) + 1)).val[0];

            p3[2].x=(cvGet2D(newMat,0,2*(15))).val[0];
            p3[2].y=(cvGet2D(newMat,0,2*(15) + 1)).val[0];

            p3[3].x=(cvGet2D(newMat,0,2*(16))).val[0];
            p3[3].y=(cvGet2D(newMat,0,2*(16) + 1)).val[0];



            p3[4].x=(cvGet2D(newMat,0,2*(17))).val[0];
            p3[4].y=(cvGet2D(newMat,0,2*(17) + 1)).val[0];

            p3[5].x=(cvGet2D(newMat,0,2*(18))).val[0];
            p3[5].y=(cvGet2D(newMat,0,2*(18) + 1)).val[0];


            p3[6].x=(cvGet2D(newMat,0,2*(13))).val[0];
            p3[6].y=(cvGet2D(newMat,0,2*(13) + 1)).val[0];

            drawCRSpline(carricatureImg,p3,7,1);




            p3[0].x=(cvGet2D(newMat,0,2*(48))).val[0];
            p3[0].y=(cvGet2D(newMat,0,2*(48) + 1)).val[0];

            p3[1].x=(cvGet2D(newMat,0,2*(51))).val[0];
            p3[1].y=(cvGet2D(newMat,0,2*(51) + 1)).val[0];

            p3[2].x=(cvGet2D(newMat,0,2*(52))).val[0];
            p3[2].y=(cvGet2D(newMat,0,2*(52) + 1)).val[0];

            p3[3].x=(cvGet2D(newMat,0,2*(53))).val[0];
            p3[3].y=(cvGet2D(newMat,0,2*(53) + 1)).val[0];



            p3[4].x=(cvGet2D(newMat,0,2*(56))).val[0];
            p3[4].y=(cvGet2D(newMat,0,2*(56) + 1)).val[0];



            drawCRSpline(carricatureImg,p3,5,1);
//    /   else
            //      flag=0;
            for (int i=0;i<numberofpoints;i++)
            {
                CvScalar s1,s2;
                s1 =cvGet2D(newMat,0,2*i);
                s2 =cvGet2D(newMat,0,2*i + 1);
                //printf("%e %e \n",s1.val[0],s2.val[0]);
                cvCircle( camImage, cvPoint((int)s1.val[0],(int)s2.val[0]), 1, CV_RGB(255,(0),0), 1 );


            }

            lk.setImage(iterateImage);
            // cvWaitKey(-1);
            // printf("ITERATE STARTED \n");
            double timer= (double)cvGetTickCount();

            for (int i=0;i<60;i++)
            {

                double er = lk.iterate();

                if ((i==0 && frameIndex==1) || ( frameIndex > (stepSize + minFrameIndex) ) )
                {
                    min=er;
                    minFrameIndex=frameIndex;

                    printf("%e \n",min);

                }

                if (min>er)
                {
                    min=er;
                    minFrameIndex=frameIndex;

                }
                else
                {
                    break;
                    flag=1;
                }
            }

            //      timer = (double)cvGetTickCount() - timer;
//printf( "detection time = %gms\n", timer/((double)cvGetTickFrequency()*1000.) );

cvSetImageROI(colorresized,cvRect(0,0,640,480));

            cvResize(camImage,colorresized);
cvResetImageROI(colorresized);

            cvSetImageROI(colorresized,cvRect(639,0,640,480));
            cvResize(carricatureImg,colorresized);
cvResetImageROI(colorresized);
cvResetImageROI(colorresized);

            cvWriteFrame(writer,colorresized);      // add the frame to the file
            cvShowImage( "image",camImage);

            cvShowImage( "carimage",carricatureImg);

            // char a[300];
            // sprintf(a,"/home/rohananil/COP/images/%06d.jpg",counter++);
            //     cvSaveImage(a,camImage);

            //  cvWriteFrame(writer,camImage);      // add the frame to the file
            cvWaitKey(3);
            cvReleaseImage(&camImage);
            cvReleaseImage(&grayIterateImage);
            cvReleaseImage(&iterateImage);
            // cvReleaseImage(&templateImage);

        }
        cvReleaseVideoWriter(&writer);
    }
    cvDestroyWindow("template");
    cvDestroyWindow("image");


}
Пример #18
0
//g++ main3.cpp -o main.o `pkg-config --cflags --libs opencv`
void cvShowManyImages(char* title, int nArgs, ...) {
    printf("\nin the function\n");
    printf("#of args %d",nArgs);
    // img - Used for getting the arguments 
    IplImage *img;
    img = cvLoadImage("/home/ikaros/Pictures/Backtrack_Pure_by_twenty_steps_freedom.jpg");
    // DispImage - the image in which input images are to be copied
    IplImage *DispImage;

    int size;
    int i;
    int m, n;
    int x, y;

    // w - Maximum number of images in a row 
    // h - Maximum number of images in a column 
    int w, h;

    // scale - How much we have to resize the image
    float scale;
    int max;

    // If the number of arguments is lesser than 0 or greater than 12
    // return without displaying 
    if(nArgs <= 0) {
        printf("Number of arguments too small....\n");
        return;
    }
    else if(nArgs > 12) {
        printf("Number of arguments too large....\n");
        return;
    }
    // Determine the size of the image, 
    // and the number of rows/cols 
    // from number of arguments 
    else if (nArgs == 1) {
        w = h = 1;
        size = 300;
    }
    else if (nArgs == 2) {
        w = 2; h = 1;
        size = 300;
    }
    else if (nArgs == 3 || nArgs == 4) {
        w = 2; h = 2;
        size = 300;
    }
    else if (nArgs == 5 || nArgs == 6) {
        w = 3; h = 2;
        size = 200;
    }
    else if (nArgs == 7 || nArgs == 8) {
        w = 4; h = 2;
        size = 200;
    }
    else {
        w = 4; h = 3;
        size = 150;
    }

    // Create a new 3 channel image
    DispImage = cvCreateImage( cvSize(100 + size*w, 60 + size*h), 8, 3 );

    // Used to get the arguments passed
    va_list args;
    va_start(args, nArgs);

    // Loop for nArgs number of arguments
    for (i = 0, m = 20, n = 20; i < nArgs; i++, m += (20 + size)) {

        // Get the Pointer to the IplImage
       // img = va_arg(args, IplImage*);
	img = cvLoadImage("/home/ikaros/Pictures/Backtrack_Pure_by_twenty_steps_freedom.jpg");
	//printf("\nimg = %d\n",img);
        // Check whether it is NULL or not
        // If it is NULL, release the image, and return
/*        if(img == 0) {
            printf("Invalid arguments");
            cvReleaseImage(&DispImage);
            return;
        }
*/
        // Find the width and height of the image
        x = img->width;
        y = img->height;

        // Find whether height or width is greater in order to resize the image
        max = (x > y)? x: y;

        // Find the scaling factor to resize the image
        scale = (float) ( (float) max / size );

        // Used to Align the images
        if( i % w == 0 && m!= 20) {
            m = 20;
            n+= 20 + size;
        }

        // Set the image ROI to display the current image
        cvSetImageROI(DispImage, cvRect(m, n, (int)( x/scale ), (int)( y/scale )));

        // Resize the input image and copy the it to the Single Big Image
        cvResize(img, DispImage);

        // Reset the ROI in order to display the next image
        cvResetImageROI(DispImage);
    }

    // Create a new window, and show the Single Big Image
    cvNamedWindow( title, 1 );
    cvShowImage( title, DispImage);

    cvWaitKey(500);
   // cvDestroyWindow(title);

    // End the number of arguments
    va_end(args);

    // Release the Image Memory
    cvReleaseImage(&DispImage);
}
Пример #19
0
char operateImage(Userdata *userdata) {
	if (!userdata) {
		return 0;
	}

	IplImage *image1 = userdata->input[0];
	IplImage *image2 = userdata->input[1];
	IplImage *imageOut = userdata->output[0];
	IplImage *imageOut2 = userdata->output[1];

	static int color_mode = 4;
	static int smooth_mode = 0;
	static int otsu_mode = 0;
	static int close_mode = 0;
	static int canny_mode = 0;
	static int contour_mode = 0;
	static int hsv_mode = 0;
	static int save_img = 0;
	static int history_mode = 0;

	int key = userdata->key;
	switch (key) {
	case 'g':
		color_mode++;
		color_mode %= 5;
		break;
	case 's':
		smooth_mode = !smooth_mode;
		break;
	case 'o':
		otsu_mode = !otsu_mode;
		break;
	case 'e':
		close_mode = !close_mode;
		break;
	case 'c':
		canny_mode = !canny_mode;
		break;
	case 'b':
		contour_mode = !contour_mode;
		break;
	case 'h':
		hsv_mode = !hsv_mode;
		break;
	case 'H':
		history_mode = !history_mode;
		break;
	case 'S':
		save_img = 1;
		break;
	default:
		//cout << key << "\n";
		break;
	}

	int value = userdata->value;
	int kernelSize = userdata->kernelSize;
	kernelSize += 1 - (kernelSize % 2);
	int lowThreshold = userdata->lowThreshold;
	int highThreshold = userdata->highThreshold;
	CvScalar minScalar = cvScalar(userdata->minScalar0, userdata->minScalar1, userdata->minScalar2);
	CvScalar maxScalar = cvScalar(userdata->maxScalar0, userdata->maxScalar1, userdata->maxScalar2);

	static IplImage *tmp1d = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 1);
	static IplImage *tmp3d = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);
	cvCopy(image1, tmp3d, NULL);

	filterByHSV(tmp3d, minScalar, maxScalar, tmp3d);
	
	static int tick = 0;
	static CvRect face = cvRect(0,0,1,1);
	if ((tick %= 10) == 0) {
		//userdata->timestep = 100;
		double scale = 4;
		CvRect *faces = NULL;
		double t = 0;
		int facesCount = findFaces(tmp3d, &faces, scale, &t);
		face = (facesCount != 0) ? faces[0] : face;
		free(faces);
		//printf("%d face(s) detected in %g ms :: 1st face at {%d,%d,%d,%d}", facesCount, t, face.x, face.y, face.width, face.height);
		drawFaces(tmp3d, 1, &face);
	}
	tick++;

	//face extraction
	IplImage *subimage = createSubArray(tmp3d, face);
	cvNamedWindow(CONTROL_WINDOW  "face", 0);
	cvResizeWindow(CONTROL_WINDOW "face", subimage->width, subimage->height);
	cvShowImage(CONTROL_WINDOW    "face", subimage);

	//face histogram
	IplImage *subimage2 = cvCloneImage(subimage);
	cvCvtColor(subimage2, subimage2, CV_BGR2HSV);
	size_t binsCount = 0;
	size_t *bins = NULL;
	//printf("%d (%p)", binsCount, &bins);
	size_t ret = calcularHistograma(subimage2, &binsCount, &bins);
	//printf(" ret=%d %d (%p)", ret, binsCount, bins);
	CvScalar maxValues;
	if (bins) {
		for (size_t i = 0; i < binsCount; i++) {
			size_t idx = 0;
			for (size_t j = 0; j < 256; j++) {
				if (bins[i * 256 + j] > bins[idx]) {
					idx = j;
				}
			}
			maxValues.val[i] = idx;
		}
		free(bins);
	}
#if 0
	if (subimage->width > 10 && subimage->height > 10)
	graficarHistograma(subimage, binsCount, bins);
	cvNamedWindow(CONTROL_WINDOW  "42", 0);
	cvResizeWindow(CONTROL_WINDOW "42", subimage->width, subimage->height);
	cvShowImage(CONTROL_WINDOW    "42", subimage);
#endif

#if 0
	int minH = (int)maxValues.val[0] - 20;
	int maxH = (int)maxValues.val[0] + 20;
	int minS = (int)maxValues.val[1] - 20;
	int maxS = (int)maxValues.val[1] + 20;
	int minV = (int)maxValues.val[2] - 20;
	int maxV = (int)maxValues.val[2] + 20;

	minH = minH < 0 ? 180 - minH : minH;
	maxH = maxH > 180 ? maxH - 180 : maxH;

	printf("%d,%d,%d %d,%d,%d", minH, minS, minV, maxH, maxS, maxV);

	filterByHSV(subimage2, cvScalar(minH, minS, minV, 0), cvScalar(maxH, maxS, maxV, 0), subimage2);
	filterByHSV(subimage2, minScalar, maxScalar, subimage2);
	cvCvtColor(subimage2, subimage2, CV_HSV2BGR);

	cvNamedWindow(CONTROL_WINDOW "41", 0);
	cvResizeWindow(CONTROL_WINDOW "41", subimage2->width, subimage2->height);
	cvShowImage(CONTROL_WINDOW "41", subimage2);
#endif

	cvReleaseImage(&subimage);
	cvReleaseImage(&subimage2);
	
	cvCopy(image1, image2, NULL);
	cvCopy(imageOut, imageOut2, NULL);
	cvCopy(tmp3d, imageOut, NULL);

	//cvReleaseImage(&tmp1d);
	//cvReleaseImage(&tmp3d);

	//afterProcess(userdata);

	printf("\r");
	return 0;
}
Пример #20
0
IplImage*
create_fourier_image(const IplImage *im)
{

  IplImage *realInput;
  IplImage *imaginaryInput;
  IplImage *complexInput;
  int dft_M, dft_N;
  CvMat *dft_A, tmp;
  IplImage *image_Re;
  IplImage *image_Im;

  realInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
  imaginaryInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
  complexInput = rb_cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

  cvScale(im, realInput, 1.0, 0.0);
  cvZero(imaginaryInput);
  cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

  dft_M = cvGetOptimalDFTSize( im->height - 1 );
  dft_N = cvGetOptimalDFTSize( im->width - 1 );

  dft_A = rb_cvCreateMat( dft_M, dft_N, CV_64FC2 );
  image_Re = rb_cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
  image_Im = rb_cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

  // copy A to dft_A and pad dft_A with zeros
  cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
  cvCopy( complexInput, &tmp, NULL );
  if( dft_A->cols > im->width )
  {
    cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
    cvZero( &tmp );
  }

  // no need to pad bottom part of dft_A with zeros because of
  // use nonzero_rows parameter in cvDFT() call below

  cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

  // Split Fourier in real and imaginary parts
  cvSplit( dft_A, image_Re, image_Im, 0, 0 );

  // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
  cvPow( image_Re, image_Re, 2.0);
  cvPow( image_Im, image_Im, 2.0);
  cvAdd( image_Re, image_Im, image_Re, NULL);
  cvPow( image_Re, image_Re, 0.5 );

  // Compute log(1 + Mag)
  cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
  cvLog( image_Re, image_Re ); // log(1 + Mag)

  // Rearrange the quadrants of Fourier image so that the origin is at
  // the image center
  cvShiftDFT( image_Re, image_Re );

  cvReleaseImage(&realInput);
  cvReleaseImage(&imaginaryInput);
  cvReleaseImage(&complexInput);
  cvReleaseImage(&image_Im);

  cvReleaseMat(&dft_A);

  return image_Re;

}
Пример #21
0
/* コールバック関数 */
void on_mouse(int event, int x, int y, int flags, void *param = NULL) {
	char str[64];
	IplImage *dst_img1;
	IplImage *dst_img2;
	
	// (4)マウスイベントを取得
	switch (event) {
	case CV_EVENT_MOUSEMOVE:
		snprintf(str, 64, "(%d,%d) %s", x, y, "MOUSE_MOVE");
		if (drawing_box) {
			rect.width = x - rect.x;
			rect.height = y - rect.y;
		}
		break;
	case CV_EVENT_LBUTTONDOWN:
		snprintf(str, 64, "(%d,%d) %s", x, y, "LBUTTON_DOWN");
		drawing_box = true;
		rect = cvRect(x, y, 0, 0);
		break;
	case CV_EVENT_LBUTTONUP:
		snprintf(str, 64, "(%d,%d) %s", x, y, "LBUTTON_UP");
		drawing_box = false;
		if (rect.width<0) {
			rect.x += rect.width;
			rect.width *= -1;
		}
		if (rect.height<0) {
			rect.y += rect.height;
			rect.height *= -1;
		}
		if (rect.width !=0 && rect.height!=0) {
			cut(dst_img2);
		}
		break;
	case CV_EVENT_RBUTTONDOWN:
		snprintf(str, 64, "(%d,%d) %s", x, y, "RBUTTON_DOWN");
		break;
	case CV_EVENT_MBUTTONDOWN:
		snprintf(str, 64, "(%d,%d) %s", x, y, "MBUTTON_DOWN");
		break;
	case CV_EVENT_RBUTTONUP:
		snprintf(str, 64, "(%d,%d) %s", x, y, "RBUTTON_UP");
		break;
	case CV_EVENT_MBUTTONUP:
		snprintf(str, 64, "(%d,%d) %s", x, y, "MBUTTON_UP");
		break;
	case CV_EVENT_LBUTTONDBLCLK:
		snprintf(str, 64, "(%d,%d) %s", x, y, "LBUTTON_DOUBLE_CLICK");
		break;
	case CV_EVENT_RBUTTONDBLCLK:
		snprintf(str, 64, "(%d,%d) %s", x, y, "RBUTTON_DOUBLE_CLICK");
		break;
	case CV_EVENT_MBUTTONDBLCLK:
		snprintf(str, 64, "(%d,%d) %s", x, y, "MBUTTON_DOUBLE_CLICK");
		break;
	
	}
	//マウスボタン,修飾キーを取得
	if (flags & CV_EVENT_FLAG_LBUTTON)
		strncat(str, " + LBUTTON", 64);
	if (flags & CV_EVENT_FLAG_RBUTTON)
		strncat(str, " + RBUTTON", 64);
	if (flags & CV_EVENT_FLAG_MBUTTON)
		strncat(str, " + MBUTTON", 64);



	//マウス座標,イベント,修飾キーなどを画像dst_img1に描画,表示
	//表示画像がずれるのを防ぐために、cvPutText()の表示位置を固定
	//フォントが重なることを防ぐために、毎回元画像src_imgの状態に戻す
	cvPutText(dst_img1 =cvCloneImage(src_img), str, cvPoint(5, src_img->height-10), &font, CV_RGB(0, 200, 100));
	cvShowImage("src", dst_img1);
	cvCopy(src_img, dst_img1);

}
Пример #22
0
/**
 * This method show a image in normal display
 *
 *
 *
 *
 */
void DiVADisplay::windowVisible() {
	unsigned int baseSize = MINGAP;
	unsigned int i = 0, j = 0, k = 0, w = 0;
	unsigned int limitX = 0, sizeText = 0;
	unsigned int x = 0, y = 0;
	unsigned int gapText = 0, sizefont = 0;
	IplImage *tmp = NULL, *tmp2 = NULL, *global = NULL;
	DiVAImage* newImage = NULL, *temporal = NULL;
	DiVASCALAR black = DiVAScalarAll(1);
	CvSize size;

	if (!update) {
		this->CreateImage();
		global = OpenCVConverter::getIplImage(this->getDiVAImage());

		// Check space color
		for(i = 1; i <= this->getRows(); i++) {
			for(j = 1; j <= this->getColumns(); j++) {
				if (this->getDiVAImageN(i,j) != NULL) {
					if (this->getDiVAImageN(i,j)->getNChannels() != 3) {
						temporal = new DiVAImage(this->getImWidthInput(),this->getImHeightInput(),3 ,this->getDiVAImageN(i,j)->getDepth()); 
						tmp2 = OpenCVConverter::getIplImage(temporal);
						delete temporal;
						tmp = OpenCVConverter::getIplImage(this->getDiVAImageN(i,j));
						cvCvtColor(tmp,tmp2,CV_GRAY2RGB);
						cvReleaseImage(&tmp);
					}
					else 
						tmp2 = OpenCVConverter::getIplImage(this->getDiVAImageN(i,j));
					// Allocate images (imagen inicial = tmp2)
					x = this->getPositionxN(i,j);
					y = this->getPositionyN(i,j);
					cvSetImageROI(global, cvRect(x,y,this->getImWidthInput(),this->getImHeightInput()));
					cvCopy(tmp2,global);
					cvReleaseImage(&tmp2);
					cvResetImageROI(global);
					// Include Title
					if (this->getTitleN(i,j) != NULL) {
						limitX = this->getGlobalWidth();
						x = this->getPositionxN(i,j);
						y = this->getGlobalHeight() - (this->getImHeightInput() * (i-1));
						cvGetTextSize(this->getTitleN(i,j),this->getFont(),&size,NULL);
						sizeText = this->getImWidthInput() - size.width;
						if (sizeText > 0)
							x += (((int)(this->getImWidthInput()/2)) - ((int)(size.width/2)));
	
						sizefont = MINGAP;
						sizeText = (int)(sizefont * this->getSizeFont());
						gapText = sizeText + (int)(2*(sizeText/8));
						y -= (gapText * (i));
						y += (int)(sizeText/8);

						temporal = new DiVAImage(sizeText,sizeText,3,8);
						temporal->setPixels(black);
						tmp = OpenCVConverter::getIplImage(temporal);
						delete temporal;
						cvSetImageROI(global, cvRect(x,y,sizeText,sizeText));
						cvCopy(tmp,global);
						cvResetImageROI(global);
						cvReleaseImage(&tmp);
						cvPutText(global ,this->getTitleN(i,j),cvPoint(x,y),this->getFont(),cvScalar(256,256,256,0));
					}
				}
			}
		}
	
		// Global image resize (imagen inicial = global)
		if (this->sizeWindow)
			temporal = new DiVAImage(this->wWidth,this->wHeight,3,this->getDiVAImage()->getDepth());
		else 
			temporal = new DiVAImage(this->getGlobalWidth(),this->getGlobalHeight(),3 ,this->getDiVAImage()->getDepth());
		tmp2 = OpenCVConverter::getIplImage(temporal);
		delete temporal;
		cvResize(global,tmp2);
		newImage = OpenCVConverter::getDiVAImage(tmp2);
		//newImage->setVAlign(1);
		newImage->setVAlign(0);
		this->setDiVAImage(newImage);
		delete newImage;
		cvReleaseImage(&global);
		cvReleaseImage(&tmp2);
	}
	// Check and Visualize
	if (this->getActive() == false)
		this->onWindows();
	if (this->getDiVAImage() != NULL) {
		tmp = OpenCVConverter::getIplImage(this->image);
		if (tmp != NULL) {
			cvShowImage(this->getWindowName(),tmp);
			cvWaitKey(1);
			cvReleaseImage(&tmp);
		}
	}
	this->update = true;
	return;
}
// Update Motion History Image: Calculate motion features and orientation.
void motionDetection(IplImage* image, IplImage* destination_image, MotionInfo* motionInfo)
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize image_size = cvSize(image->width, image->height); // get current frame image_size
    int previous_frame_index = last_index, current_frame_index;
    
    initialize_images(image_size);
    
    cvCvtColor(image, image_buffer[last_index], CV_BGR2GRAY); // convert frame to grayscale
    
    current_frame_index = (last_index + 1) % N; // index of (last_index - (N-1))th frame
    last_index = current_frame_index;
    
    silhouette = image_buffer[current_frame_index];
    
    cvAbsDiff(image_buffer[previous_frame_index], image_buffer[current_frame_index], silhouette); // Get difference between frames
    cvThreshold(silhouette, silhouette, DIFFERENCE_THRESHOLD, 1, CV_THRESH_BINARY); // Add threshold
    //cvDilate(silhouette, silhouette, 0, 18);
    //cvErode(silhouette, silhouette, 0, 10);
    
    cvUpdateMotionHistory(silhouette, mhi, timestamp, MHI_DURATION); // Update MHI
    
    // Convert MHI to blue 8U image
    cvCvtScale(mhi, orientation_mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION);
    
    if (destination_image) {
      cvZero(destination_image);
      cvCvtPlaneToPix(orientation_mask, 0, 0, 0, destination_image);
    }
    
    // Calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient(mhi, orientation_mask, orientation, MAX_TIME_DELTA, MIN_TIME_DELTA, 3);
    
    // motion_feature_sequence = extract_motion_features();
    if(!storage)
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);
    
    CvSeq* motion_feature_sequence = cvSegmentMotion(mhi, segment_mask, storage, timestamp, MAX_TIME_DELTA);
    
    int SEGMENT_WIDTH = image_size.width / MAX_SEGMENTS_X;
    int SEGMENT_HEIGHT = image_size.height / MAX_SEGMENTS_Y;
    
    // Global motion
    CvRect global_motion_segment = cvRect(0, 0, image_size.width, image_size.height);
    motionInfo->global_angle = calculate_orientation(global_motion_segment, silhouette);
    
    if (destination_image)
      draw_orientation(destination_image, &global_motion_segment, motionInfo->global_angle, 100, CV_RGB(0, 255, 0), true);
    
    long area = 0;
    long totalArea = 0;
    int totalMovingSegments = 0;
    bool hasValidMovement = false;
    CvRect segmentRect;
    
    // Segmented motion
    for(int x = 0; x < MAX_SEGMENTS_X; x++)
    {
      for(int y = 0; y < MAX_SEGMENTS_Y; y++)
      {
        segmentRect = cvRect(x * SEGMENT_WIDTH, y * SEGMENT_HEIGHT, SEGMENT_WIDTH, SEGMENT_HEIGHT);
        area = calculate_motion(&segmentRect, motion_feature_sequence);
        hasValidMovement = (area > MIN_MOTION_FEATURE_AREA);
        
        motionInfo->segment_motion_areas[x][y] = area;
        motionInfo->segment_movements[x][y] = hasValidMovement;
        motionInfo->segment_angles[x][y] = calculate_orientation(segmentRect, silhouette);
        
        totalArea += area;
        totalMovingSegments += (area > MIN_MOTION_FEATURE_AREA);
        
        //printf("%i, ", area);
        //fflush(stdout);
        
        if (hasValidMovement)
          if (destination_image)
            draw_orientation(destination_image, &segmentRect, motionInfo->segment_angles[x][y], 20, CV_RGB(255, 0, 0), true);
      }
    }
    motionInfo->total_motion_area = totalArea;
    motionInfo->total_segments_with_movements = totalMovingSegments;
    motionInfo->SEGMENTS_X = MAX_SEGMENTS_X;
    motionInfo->SEGMENTS_Y = MAX_SEGMENTS_Y;
    
    printf("%i, %f\n", totalArea, (float)totalArea / (float)(image_size.width*image_size.height));
    //fflush(stdout);
}
static GstFlowReturn
kms_pointer_detector_transform_frame_ip (GstVideoFilter * filter,
    GstVideoFrame * frame)
{
  KmsPointerDetector *pointerdetector = KMS_POINTER_DETECTOR (filter);
  GstMapInfo info;
  double min_Bhattacharyya = 1.0, bhattacharyya = 1, bhattacharyya2 =
      1, bhattacharyya3 = 1;
  int i = 0;

  pointerdetector->frameSize = cvSize (frame->info.width, frame->info.height);
  kms_pointer_detector_initialize_images (pointerdetector, frame);

  gst_buffer_map (frame->buffer, &info, GST_MAP_READ);
  pointerdetector->cvImage->imageData = (char *) info.data;

  if ((pointerdetector->iteration > FRAMES_TO_RESET)
      && (pointerdetector->state != CAPTURING_SECOND_HIST)) {
    get_histogram (pointerdetector->cvImage, pointerdetector->upCornerRect1,
        pointerdetector->trackinRectSize, pointerdetector->histSetUpRef);
    pointerdetector->histRefCapturesCounter = 0;
    pointerdetector->secondHistCapturesCounter = 0;
    pointerdetector->state = CAPTURING_REF_HIST;
    pointerdetector->colorRect1 = WHITE;
    pointerdetector->colorRect2 = WHITE;
    pointerdetector->iteration = 6;
  }
  if (pointerdetector->iteration == 5) {
    get_histogram (pointerdetector->cvImage, pointerdetector->upCornerRect1,
        pointerdetector->trackinRectSize, pointerdetector->histSetUpRef);
    pointerdetector->state = CAPTURING_REF_HIST;
    goto end;
  }

  if (pointerdetector->iteration < 6)
    goto end;

  get_histogram (pointerdetector->cvImage, pointerdetector->upCornerRect1,
      pointerdetector->trackinRectSize, pointerdetector->histSetUp1);
  bhattacharyya2 =
      cvCompareHist (pointerdetector->histSetUp1,
      pointerdetector->histSetUpRef, CV_COMP_BHATTACHARYYA);
  if ((bhattacharyya2 >= COMPARE_THRESH_SECOND_HIST)
      && (pointerdetector->state == CAPTURING_REF_HIST)) {
    pointerdetector->histRefCapturesCounter++;
    if (pointerdetector->histRefCapturesCounter > 20) {
      pointerdetector->histRefCapturesCounter = 0;
      pointerdetector->colorRect1 = CV_RGB (0, 255, 0);
      pointerdetector->state = CAPTURING_SECOND_HIST;
    }
  }
  if (pointerdetector->state == CAPTURING_SECOND_HIST) {
    get_histogram (pointerdetector->cvImage, pointerdetector->upCornerRect2,
        pointerdetector->trackinRectSize, pointerdetector->histSetUp2);
    bhattacharyya3 =
        cvCompareHist (pointerdetector->histSetUp1,
        pointerdetector->histSetUp2, CV_COMP_BHATTACHARYYA);
    if (bhattacharyya3 < COMPARE_THRESH_2_RECT) {
      pointerdetector->secondHistCapturesCounter++;
      if (pointerdetector->secondHistCapturesCounter > 15) {
        pointerdetector->secondHistCapturesCounter = 0;
        pointerdetector->state = BOTH_HIST_SIMILAR;
        pointerdetector->colorRect2 = CV_RGB (0, 255, 0);
        cvCopyHist (pointerdetector->histSetUp2, &pointerdetector->histModel);
        pointerdetector->upCornerFinalRect.x = 10;
        pointerdetector->upCornerFinalRect.y = 10;
        pointerdetector->histRefCapturesCounter = 0;
        pointerdetector->secondHistCapturesCounter = 0;
      }
    }
  }
  for (i = 0; i < pointerdetector->numOfRegions; i++) {
    int horizOffset =
        pointerdetector->upCornerFinalRect.x +
        pointerdetector->windowScale * (rand () %
        pointerdetector->trackinRectSize.width -
        pointerdetector->trackinRectSize.width / 2);
    int vertOffset =
        pointerdetector->upCornerFinalRect.y +
        pointerdetector->windowScale * (rand () %
        pointerdetector->trackinRectSize.height -
        pointerdetector->trackinRectSize.height / 2);
    pointerdetector->trackingPoint1Aux.x = horizOffset;
    pointerdetector->trackingPoint1Aux.y = vertOffset;
    pointerdetector->trackingPoint2Aux.x =
        horizOffset + pointerdetector->trackinRectSize.width;
    pointerdetector->trackingPoint2Aux.y =
        vertOffset + pointerdetector->trackinRectSize.height;
    if ((horizOffset > 0)
        && (pointerdetector->trackingPoint2Aux.x <
            pointerdetector->cvImage->width)
        && (vertOffset > 0)
        && (pointerdetector->trackingPoint2Aux.y <
            pointerdetector->cvImage->height)) {
      if (pointerdetector->show_debug_info)
        cvRectangle (pointerdetector->cvImage,
            pointerdetector->trackingPoint1Aux,
            pointerdetector->trackingPoint2Aux, CV_RGB (0, 255, 0), 1, 8, 0);
      cvSetImageROI (pointerdetector->cvImage,
          cvRect (pointerdetector->trackingPoint1Aux.x,
              pointerdetector->trackingPoint1Aux.y,
              pointerdetector->trackinRectSize.width,
              pointerdetector->trackinRectSize.height));
      cvCopy (pointerdetector->cvImage, pointerdetector->cvImageAux1, 0);
      cvResetImageROI (pointerdetector->cvImage);
      calc_histogram (pointerdetector->cvImageAux1,
          pointerdetector->histCompare);
      bhattacharyya =
          cvCompareHist (pointerdetector->histModel,
          pointerdetector->histCompare, CV_COMP_BHATTACHARYYA);
      if ((bhattacharyya < min_Bhattacharyya)
          && (bhattacharyya < COMPARE_THRESH_HIST_REF)) {
        min_Bhattacharyya = bhattacharyya;
        pointerdetector->trackingPoint1 = pointerdetector->trackingPoint1Aux;
        pointerdetector->trackingPoint2 = pointerdetector->trackingPoint2Aux;
      }
    }
  }
  cvRectangle (pointerdetector->cvImage, pointerdetector->upCornerRect1,
      pointerdetector->downCornerRect1, pointerdetector->colorRect1, 1, 8, 0);
  cvRectangle (pointerdetector->cvImage, pointerdetector->upCornerRect2,
      pointerdetector->downCornerRect2, pointerdetector->colorRect2, 1, 8, 0);
  if (min_Bhattacharyya < 0.95) {
    pointerdetector->windowScale = pointerdetector->windowScaleRef;
  } else {
    pointerdetector->windowScale = pointerdetector->cvImage->width / 8;
  }
  CvPoint finalPointerPositionAux;

  finalPointerPositionAux.x = pointerdetector->upCornerFinalRect.x +
      pointerdetector->trackinRectSize.width / 2;
  finalPointerPositionAux.y = pointerdetector->upCornerFinalRect.y +
      pointerdetector->trackinRectSize.height / 2;
  if (abs (pointerdetector->finalPointerPosition.x -
          finalPointerPositionAux.x) < 55 ||
      abs (pointerdetector->finalPointerPosition.y -
          finalPointerPositionAux.y) < 55) {
    finalPointerPositionAux.x =
        (finalPointerPositionAux.x +
        pointerdetector->finalPointerPosition.x) / 2;
    finalPointerPositionAux.y =
        (finalPointerPositionAux.y +
        pointerdetector->finalPointerPosition.y) / 2;
  }
  pointerdetector->upCornerFinalRect = pointerdetector->trackingPoint1;
  pointerdetector->downCornerFinalRect = pointerdetector->trackingPoint2;

  pointerdetector->finalPointerPosition.x = finalPointerPositionAux.x;
  pointerdetector->finalPointerPosition.y = finalPointerPositionAux.y;

  cvCircle (pointerdetector->cvImage, pointerdetector->finalPointerPosition,
      10.0, WHITE, -1, 8, 0);

  kms_pointer_detector_check_pointer_position (pointerdetector);

end:
  pointerdetector->iteration++;
  gst_buffer_unmap (frame->buffer, &info);
  return GST_FLOW_OK;
}
Пример #25
0
void CDataOut::Disp_out(IplImage *framecopy)
{
  char ndisp[100];
  sprintf(ndisp,(resdir+"disp%d.txt").c_str(),iter++);
  DispFile.open(ndisp);
  int ii=0;
  CvMat *temp,*temp2,*temp3;
  temp=cvCreateMatHeader(6,6,CV_32FC1);
  temp2=cvCreateMat(3,6,CV_32FC1);
  temp3=cvCreateMat(3,3,CV_32FC1);
  CvMat* vect=cvCreateMat (6,1,CV_32FC1);
  CvMat* res6=cvCreateMat (6,1,CV_32FC1);
  CvMat* vect2=cvCreateMat(1,6,CV_32FC1);
  CvMat* proj = cvCreateMat(4,2,CV_32FC1);    ///NOT RELEASED
  CvMat* m = cvCreateMat(3,1,CV_32FC1);

  for ( list<CElempunto*>::iterator It=pEstimator->pMap->bbdd.begin();
	    It != pEstimator->pMap->bbdd.end(); It++ )
    {
      if((*It)->state>2)
	  {

	  cvmSet(m,0,0,cos((*It)->theta)*sin((*It)->phi));
	  cvmSet(m,1,0,-sin((*It)->theta));
	  cvmSet(m,2,0,cos((*It)->theta)*cos((*It)->phi));
	  cvNormalize( m, m);

	/*  float xreal=(*It)->wx +cvmGet(m,0,0)/(*It)->rho;
	  float yreal=(*It)->wy +cvmGet(m,1,0)/(*It)->rho;
	  float zreal=(*It)->wz +cvmGet(m,2,0)/(*It)->rho;
*/
    CvMat *pCovMat = pEstimator->getCovMat();

	  if (12+ii*6< pCovMat->width && 12+ii*6< pCovMat->height)
	    {

	      cvGetSubRect( pCovMat,temp,cvRect(12+ii*6,12+ii*6,6,6) );

        for (int part=0; part<40; part++){
		  cvmSet(vect,0,0,randomVector(-.005,.005));
  		  cvmSet(vect,1,0,randomVector(-.005,.005));
  		  cvmSet(vect,2,0,randomVector(-.005,.005));
		  cvmSet(vect,3,0,randomVector(-.005,0.005));
		  cvmSet(vect,4,0,randomVector(-.005,0.005));
		  cvmSet(vect,5,0,randomVector(-1,1));

          cvMatMul(temp,vect,res6);

          cvmSet(m,0,0,cos(cvmGet(res6,3,0)+(*It)->theta)*sin(cvmGet(res6,4,0)+(*It)->phi));
  		  cvmSet(m,1,0,-sin(cvmGet(res6,3,0)+(*It)->theta));
  		  cvmSet(m,2,0,cos(cvmGet(res6,3,0)+(*It)->theta)*cos(cvmGet(res6,4,0)+(*It)->phi));
    	  cvNormalize( m, m);
    	  cvmSet (vect2,0,0,((cvmGet(res6,0,0)+(*It)->wx)+cvmGet(m,0,0)/(cvmGet(res6,5,0)+(*It)->rho)));
    	  cvmSet(vect2,0,1,((cvmGet(res6,1,0)+(*It)->wy)+cvmGet(m,1,0)/(cvmGet(res6,5,0)+(*It)->rho)));
   	  cvmSet(vect2,0,2,((cvmGet(res6,2,0)+(*It)->wz)+cvmGet(m,2,0)/(cvmGet(res6,5,0)+(*It)->rho)));

        DispFile<<cvmGet(vect2,0,0)<<" ";
        DispFile<<cvmGet(vect2,0,1)<<" ";
        DispFile<<cvmGet(vect2,0,2)<<" ";
        DispFile<<ii<<endl;
          cvmSet(vect2,0,0,cvmGet(res6,0,0)+(*It)->wx);
          cvmSet(vect2,0,1,cvmGet(res6,1,0)+(*It)->wy);
          cvmSet(vect2,0,2,cvmGet(res6,2,0)+(*It)->wz);
          cvmSet(vect2,0,3,cvmGet(res6,3,0)+(*It)->theta);
          cvmSet(vect2,0,4,cvmGet(res6,4,0)+(*It)->phi);
          cvmSet(vect2,0,5,cvmGet(res6,5,0)+(*It)->rho);

             pModelCam->cvProject_1_pto(vect2,proj,NULL,NULL,NULL);
             if (framecopy != NULL){
    		   cvCircle (framecopy,cvPoint((int)cvmGet(proj,0,0),(int)cvmGet(proj,0,1)),1,cvScalar(0,0,255),1 );
             }
   	      }

	    }
	  }
      ii++;
    }
    cvReleaseMatHeader(&temp);
    cvReleaseMat(&temp2);
    cvReleaseMat(&temp3);
    cvReleaseMat(&vect);
    cvReleaseMat(&vect2);
    cvReleaseMat(&res6);
    cvReleaseMat(&proj);
    DispFile.close();
}
Пример #26
0
 int main() {
  CvPoint pt1b,pt2b, pt1t,pt2t,ptarry[4];
  int tempwidth,tempheight;
  CvRect regt,rectROIbot,rectROItop;
  rectROItop=cvRect(0,0,80,10);
  rectROIbot=cvRect(0,50,80,10);
  CvPoint b_cir_center,t_cir_center;
  CvPoint frame_center;
  CvPoint A,B,C,D;
  CvPoint temp;
  double angle,spinsize;
  int cir_radius=1; 
  int frame_width=160, frame_height=120;
  IplImage* frame;
  IplImage* threshframe;
  IplImage* hsvframe;
  IplImage* threshframebot;
  IplImage* threshframetop;
  IplImage* modframe;
  IplImage* dilframetop;
  IplImage* dilframebot;
  int release=0, rmax=100;
  int modfheight, modfwidth;
  int_serial();
  unsigned char sendBuf;
   CvCapture* capture = cvCaptureFromCAM( -1 );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
  cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   while ( 1 ) {
     // Get one frame
      frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }

     modfheight = frame->height;
     modfwidth = frame->width;
      modframe = cvCreateImage(cvSize((int)(modfwidth/moddiv),(int)(modfheight/moddiv)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
      hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
      threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
    // cvInRangeS(hsvframe,cvScalar(0, 180, 140),cvScalar(15, 230, 235),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) red
     cvInRangeS(hsvframe,cvScalar(100, 20, 40),cvScalar(140, 120, 100),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);

      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
 
//////////////////////////////////////////////////////////////////////////////////////////
   if (seq==0) {
     threshframebot=cvCloneImage(threshframe);
     cvSetImageROI(threshframebot,rectROIbot);
     dilframebot = cvCreateImage(cvGetSize(threshframebot),8,1);
     cvDilate(threshframebot,dilframebot,NULL,2); //cvDilate(input frame,

     CBlobResult blobs_bot;
     blobs_bot = CBlobResult(dilframebot,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_bot.Filter(blobs_bot,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_bot;
     blobs_bot.GetNthBlob(CBlobGetArea(),0,biggestblob_bot); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1b.x = biggestblob_bot.MinX()*moddiv;
     pt1b.y = biggestblob_bot.MinY()*moddiv+100;
     pt2b.x = biggestblob_bot.MaxX()*moddiv;
     pt2b.y = biggestblob_bot.MaxY()*moddiv+100;
     b_cir_center.x=(pt1b.x+pt2b.x)/2;
     b_cir_center.y=(pt1b.y+pt2b.y)/2;//}
//////////////////////////////////////////////////////////////////////////////////////////
//    if(seq==seqdiv){
      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
      dilframetop = cvCreateImage(cvGetSize(threshframetop),8,1);
     cvDilate(threshframetop,dilframetop,NULL,2); //cvDilate(input frame,
     CBlobResult blobs_top;
     blobs_top = CBlobResult(dilframetop,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_top.Filter(blobs_top,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_top;
     blobs_top.GetNthBlob(CBlobGetArea(),0,biggestblob_top); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1t.x = biggestblob_top.MinX()*moddiv;
     pt1t.y = biggestblob_top.MinY()*moddiv;
     pt2t.x = biggestblob_top.MaxX()*moddiv;
     pt2t.y = biggestblob_top.MaxY()*moddiv;
     t_cir_center.x=(pt1t.x+pt2t.x)/2;
     t_cir_center.y=(pt1t.y+pt2t.y)/2;// }
//////////////////////////////////////////////////////////////////////////////////////
//   if(seq==seqdiv+1) {
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
     A.x=frame_center.x-4;
     A.y=frame_center.y;
     B.x=frame_center.x+4;
     B.y=frame_center.y;
     C.y=frame_center.y-4;
     C.x=frame_center.x;
     D.y=frame_center.y+4;
     D.x=frame_center.x;
     cvRectangle(frame,pt1t,pt2t,cvScalar(255,0,0),1,8,0);
     cvRectangle(frame,pt1b,pt2b,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
     //cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0);
     cvCircle( frame, b_cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle
     cvLine(frame, A, B,cvScalar(255,0,255),2,8,0);
     cvLine(frame, C, D,cvScalar(255,0,255),2,8,0);
    

    if (b_cir_center.x!=0&&b_cir_center.y!=100) 
    {
    cvLine(frame, b_cir_center, frame_center,cvScalar(0,255,0),1,8,0);
    sendchar(253);sendchar(255);sendchar(255);
    sendchar(254);sendchar(b_cir_center.x);sendchar(b_cir_center.y);
    printf("top:(255, 255);  bottom: (%3d, %3d)\n",b_cir_center.x,b_cir_center.y);
    }

    if(t_cir_center.x!=0&&t_cir_center.y!=0)
     {
     cvLine(frame, frame_center, t_cir_center,cvScalar(255,255,0),1,8,0);
    sendchar(253);sendchar(t_cir_center.x);sendchar(t_cir_center.y);
    sendchar(254);sendchar(255); sendchar(255);
    printf("top:(%3d, %3d);  bottom: (255, 255)\n",t_cir_center.x,t_cir_center.y);

    }

     if ((b_cir_center.x!=0&&b_cir_center.y!=100)&&(t_cir_center.x!=0&&t_cir_center.y!=0)) 
     {
     cvLine(frame, b_cir_center, t_cir_center,cvScalar(0,255,255),1,8,0);
     printf("top:(%3d, %3d);  bottom: (%3d, %3d)\n",t_cir_center.x,t_cir_center.y,b_cir_center.x,b_cir_center.y);
     sendchar(253);sendchar(t_cir_center.x); sendchar(t_cir_center.y);
     sendchar(254);sendchar(b_cir_center.x);sendchar(b_cir_center.y);
     }
		
    }
    seq++;
    seq=seq%(seqdiv+1);
     cvShowImage( "mywindow", frame); // show output image
//     cvShowImage( "bot", threshframebot);
//     cvShowImage( "top", threshframetop);
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27  ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   //v4l.flush();
   cvDestroyWindow( "mywindow" );
   
return 0;
 }
Пример #27
0
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
	//bridge that will transform the message (image) from ROS code back to "image" code
  sensor_msgs::CvBridge bridge;
  
  //publish data (obstacle waypoints) back to the boat
  ros::NodeHandle n;
  //std_msgs::Float32 xWaypoint_msg;         // X coordinate obstacle message
  //std_msgs::Float32 yWaypoint_msg;         // Y coordinate obstacle message
  //publish the waypoint data             
  //ros::Publisher waypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //ros::Publisher Ywaypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //std::stringstream ss;
  
  /***********************************************************************/
  //live image coming streamed straight from the boat's camera
  IplImage* boatFront = bridge.imgMsgToCv(msg, "bgr8");
  IplImage* backUpImage = bridge.imgMsgToCv(msg, "bgr8");
  boatFront->origin = IPL_ORIGIN_TL;   //sets image origin to top left corner
  //Crop the image to the ROI
  cvSetImageROI(boatFront, cvRect(0,0,boatFront->height/0.5,boatFront->width/1.83));
  int X = boatFront->height;
  int Y = boatFront->width;
  /***********************************************************************/
  //boat's edge distance from the camera. This is used for visual calibration
  //to know the distance from the boat to the nearest obstacles.
  //With respect to the mounted camera, distance is 21 inches (0.5334 m) side to side
  //and 15 inches (0.381 m).
  //float boatFrontDistance = 0.381;    //distance in meters
  //float boatSideDistance = 0.5334;    //distance in meters
  
  // These variables tell the distance from the center bottom of the image
  // (the camera) to the square surrounding a the obstacle
  float xObstacleDistance = 0.0;
  float yObstacleDistance = 0.0;
  float obstacleDistance = 0.0;
  
  int pixelsNumber = 6;  //number of pixels for an n x n matrix and # of neighbors
  const int arraySize = pixelsNumber;
  const int threeArraySize = pixelsNumber;
  //if n gets changed, then the algorithm might have to be
  //recalibrated. Try to keep it constant
  //these variables are used for the k nearest neighbors
  //int accuracy;
  //reponses for each of the classifications
  float responseWaterH, responseWaterS, responseWaterV; 
  float responseGroundH, responseGroundS, responseGroundV;
  //float responseSkyH, responseSkyS, responseSkyV;
  float averageHue = 0.0;
  float averageSat = 0.0;
  float averageVal = 0.0;
  CvMat* trainClasses = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
  CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
  for (int i = 0; i < pixelsNumber/2; i++)
  {
      cvmSet(trainClasses, i,0,1);
      cvmSet(trainClasses2, i,0,1);
  }
  for (int i = pixelsNumber/2; i < pixelsNumber; i++)
  {
      cvmSet(trainClasses, i,0,2);
      cvmSet(trainClasses2, i,0,2);
  }
  //for (int i =0; i<pixelsNumber;i++)
  //{
    //   cout << cvmGet(trainClasses,i,0);
      // cout << cvmGet(trainClasses2,i,0);   
  //}
  //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
  //used with the classifier 
  CvMat* nearestWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* nearestSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* nearestSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* nearestSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //Distance
  //CvMat* distanceWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //these variables are use to traverse the picture by blocks of n x n pixels at
  //a time. 
  //Index(0,0) does not exist, so make sure kj and ki start from 1 (in the
  //right way, of course)
  //x and y are the dimensions of the local patch of pixels
  int x = (boatFront->height)/2.5 + pixelsNumber + 99; 
  int y = pixelsNumber-1; 
  int ix = 0; 
  int iy = 0; 
  int skyX = 0; 
  int skyY = 0;
  //M controls the x axis (up and down); N controls the y axis (left and
  //right)
  int Mw = -550; 
  int Nw = 1300; 
  int Mg = -350; 
  int Ng = 700;
  int row1 = 0;
  int column1 = 0;
  int row2 = 0;
  int column2 = 0;
  //ground sample
  CvMat* groundTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  CvMat* groundTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  CvMat* groundTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //water sample
  CvMat* waterTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  CvMat* waterTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  CvMat* waterTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //n x n sample patch taken from the picture
  CvMat* sampleHue = cvCreateMat(1,arraySize,CV_32FC1);
  CvMat* sampleSat = cvCreateMat(1,arraySize,CV_32FC1);
  CvMat* sampleVal = cvCreateMat(1,arraySize,CV_32FC1);
  CvMat* resampleHue = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* resampleSat = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* resampleVal = cvCreateMat(arraySize,arraySize,CV_32FC1);
  //sky training sample
  CvMat* skyTrainingHue = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* skyTrainingSat = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* skyTrainingVal = cvCreateMat(arraySize,arraySize,CV_32FC1);
  //initialize each matrix element to zero for ease of use
  cvZero(groundTrainingHue);
  cvZero(groundTrainingSat);
  cvZero(groundTrainingVal);
  cvZero(waterTrainingHue);
  cvZero(waterTrainingSat);
  cvZero(waterTrainingVal);
  cvZero(sampleHue);
  cvZero(sampleSat);
  cvZero(sampleVal);
  cvZero(resampleHue);
  cvZero(resampleSat);
  cvZero(resampleVal);
  cvZero(skyTrainingHue);
  cvZero(skyTrainingSat);
  cvZero(skyTrainingVal);    
  //Stores the votes for each channel (whether it belongs to water or not
  //1 is part of water, 0 not part of water
  //if sum of votes is bigger than 1/2 the number of elements, then it belongs to water
  int votesSum = 0;
  int comparator[3];        //used when only three votes are needed
  //int comparatorTwo [3][3];    //used when six votes are needed
  //initial sum of votes is zero
  //Error if initialize both matrices inside a single for loop. Dont know why
  //for(int i = 0; i < 3; i++)
  //{   
      //comparator[i] = 0;
    //  for(int j = 0; j < 3; j++)
     // {
       //   comparatorTwo[i][j] = 0;
     // }
  //}
  for(int i = 0; i < 3; i++)
  {   
      comparator[i] = 0;
  }
  
  /***********************************************************************/
  //Convert from RGB to HSV to control the brightness of the objects.
  //work with reflexion
  /*Sky recognition. Might be useful for detecting reflexion on the water. If
    the sky is detected, and the reflection has the same characteristics of
    something below the horizon, that "something" might be water. Assume sky
    wont go below the horizon
  */
  //convert from RGB to HSV
  cvCvtColor(boatFront, boatFront, CV_BGR2HSV);
  cvCvtColor(backUpImage, backUpImage, CV_BGR2HSV);
  HsvImage I(boatFront);
  HsvImage IBackUp(backUpImage);
  //Sky detection
  for (int i=0; i<boatFront->height/3;i++)
  {
      for (int j=0; j<boatFront->width;j++)
      {
      //if something is bright enough, consider it sky and store the
      //value. HSV values go from 0 to 180 ... RGB goes from 0 to 255
          if (((I[i][j].v >= 180) && (I[i][j].s <= 16)))
              // && ((I[i][j].h >=10)))) //&& (I[i][j].h <= 144))))
          {
              //The HSV values vary between 0 and 1
              cvmSet(skyTrainingHue,skyX,skyY,I[i][j].h);
              cvmSet(skyTrainingSat,skyX,skyY,I[i][j].s);
              cvmSet(skyTrainingVal,skyX,skyY,I[i][j].v);
              I[i][j].h = 0.3*180;       //H (color)
              I[i][j].s = 0.3*180;          //S (color intensity)
              I[i][j].v = 0.6*180;          //V (brightness)
              if (skyY == pixelsNumber-1)
              {
                 if (skyX == pixelsNumber-1)
                   skyX = 1;
                 else
                   skyX = skyX + 1;
                 skyY = 1;
              }
              else
                skyY = skyY + 1;
         }   
      }
  }
  
  /***********************************************************************/
  //offline input pictures. Samples of water properties are taken from these 
  //pictures to get a range of values for H, S, V that will be stored into a 
  //pre-defined classifier
  IplImage* imageSample1 = cvLoadImage("bigObstacle.jpg");
  cvSetImageROI(imageSample1, cvRect(0,0,imageSample1->height/0.5,imageSample1->width/1.83));
  cvCvtColor(imageSample1, imageSample1, CV_BGR2HSV);
  HsvImage I1(imageSample1);
  IplImage* imageSample2 = cvLoadImage("bigObstacle2.jpg");
  cvSetImageROI(imageSample2, cvRect(0,0,imageSample2->height/0.5,imageSample2->width/1.83));
  cvCvtColor(imageSample2, imageSample2, CV_BGR2HSV);
  HsvImage I2(imageSample2);
  IplImage* imageSample3 = cvLoadImage("bigObstacle3.jpg");
  cvSetImageROI(imageSample3, cvRect(0,0,imageSample3->height/0.5,imageSample3->width/1.83));
  cvCvtColor(imageSample3, imageSample3, CV_BGR2HSV);
  HsvImage I3(imageSample3);
  IplImage* imageSample4 = cvLoadImage("river.jpg");
  cvSetImageROI(imageSample4, cvRect(0,0,imageSample4->height/0.5,imageSample4->width/1.83));
  cvCvtColor(imageSample4, imageSample4, CV_BGR2HSV);
  HsvImage I4(imageSample4);
  IplImage* imageSample5 = cvLoadImage("river2.jpg");
  cvSetImageROI(imageSample5, cvRect(0,0,imageSample5->height/0.5,imageSample5->width/1.83));
  cvCvtColor(imageSample5, imageSample5, CV_BGR2HSV);
  HsvImage I5(imageSample5);
  IplImage* imageSample6 = cvLoadImage("roundObstacle4.jpg");
  cvSetImageROI(imageSample6, cvRect(0,0,imageSample6->height/0.5,imageSample6->width/1.83));
  cvCvtColor(imageSample6, imageSample6, CV_BGR2HSV);
  HsvImage I6(imageSample6);
  IplImage* imageSample7 = cvLoadImage("farm.jpg");
  cvSetImageROI(imageSample7, cvRect(0,0,imageSample7->height/0.5,imageSample7->width/1.83));
  cvCvtColor(imageSample7, imageSample7, CV_BGR2HSV);
  HsvImage I7(imageSample7);
  IplImage* imageSample8 = cvLoadImage("bigObstacle4.jpg");
  cvSetImageROI(imageSample8, cvRect(0,0,imageSample8->height/0.5,imageSample8->width/1.83));
  cvCvtColor(imageSample8, imageSample8, CV_BGR2HSV);
  HsvImage I8(imageSample8);
  IplImage* imageSample9 = cvLoadImage("roundObstacle6.jpg");
  cvSetImageROI(imageSample9, cvRect(0,0,imageSample9->height/0.5,imageSample9->width/1.83));
  cvCvtColor(imageSample9, imageSample9, CV_BGR2HSV);
  HsvImage I9(imageSample9);
  IplImage* imageSample10 = cvLoadImage("roundObstacle.jpg");
  cvSetImageROI(imageSample10, cvRect(0,0,imageSample10->height/0.5,imageSample10->width/1.83));
  cvCvtColor(imageSample10, imageSample10, CV_BGR2HSV);
  HsvImage I10(imageSample10);
  //grab water samples from each picture
  for (int i=0; i < threeArraySize; i++)
  {
      for (int j=0; j < arraySize; j++)
      {
          row1 = ceil(X/1.2866)+ceil(X/5.237)+i+Mw;
          column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+Nw;
          averageHue = (I1[row1][column1].h + I2[row1][column1].h + I3[row1][column1].h + I4[row1][column1].h + I5[row1][column1].h + 
          I6[row1][column1].h + I7[row1][column1].h + I8[row1][column1].h + I9[row1][column1].h + I10[row1][column1].h) / 10;
          averageSat = (I1[row1][column1].s + I2[row1][column1].s + I3[row1][column1].s + I4[row1][column1].s + I5[row1][column1].s + 
          I6[row1][column1].s + I7[row1][column1].s + I8[row1][column1].s + I9[row1][column1].s + I10[row1][column1].s) / 10;
          averageVal = (I1[row1][column1].v + I2[row1][column1].v + I3[row1][column1].v + I4[row1][column1].v + I5[row1][column1].v + 
          I6[row1][column1].v + I7[row1][column1].v + I8[row1][column1].v + I9[row1][column1].v + I10[row1][column1].v) / 10;
          //water patch sample (n X n matrix)
          cvmSet(waterTrainingHue,i,j,averageHue);
          cvmSet(waterTrainingSat,i,j,averageSat);
          cvmSet(waterTrainingVal,i,j,averageVal);  
           //patch is red (this is for me to know where the ground patch sample is)
          //I[row1][column1].h = 0;
          //I[row1][column1].s = 255;
          //I[row1][column1].v = 255;
      }
  }
  //order the water samples in ascending order on order to know a range
  cvSort(waterTrainingHue, waterTrainingHue, CV_SORT_ASCENDING);
  cvSort(waterTrainingSat, waterTrainingSat, CV_SORT_ASCENDING);
  cvSort(waterTrainingVal, waterTrainingVal, CV_SORT_ASCENDING);
  // find the maximum and minimum values in the array to create a range
  int maxH = cvmGet(waterTrainingHue,0,0);
  int maxS = cvmGet(waterTrainingSat,0,0);
  int maxV = cvmGet(waterTrainingVal,0,0);
  int minH = cvmGet(waterTrainingHue,0,0);
  int minS = cvmGet(waterTrainingSat,0,0);
  int minV = cvmGet(waterTrainingVal,0,0);
  for (int i=0; i < threeArraySize; i++)
  {
      for (int j=0; j < arraySize; j++)
      {
          if (cvmGet(waterTrainingHue,i,j) > maxH)
              maxH = cvmGet(waterTrainingHue,i,j);
          if (cvmGet(waterTrainingSat,i,j) > maxS)
              maxS = cvmGet(waterTrainingHue,i,j);
          if (cvmGet(waterTrainingVal,i,j) > maxV)
              maxV = cvmGet(waterTrainingVal,i,j);
          if (cvmGet(waterTrainingHue,i,j) < minH)
              minH = cvmGet(waterTrainingHue,i,j);
          if (cvmGet(waterTrainingSat,i,j) < minS)
              minS = cvmGet(waterTrainingSat,i,j);
          if (cvmGet(waterTrainingVal,i,j) < minV)
              minV = cvmGet(waterTrainingVal,i,j);
      }
  }
	
	/***********************************************************************/
  //Grab a random patch of water below the horizon and compare every other
  //pixel against it
  //The results of the water detection depend on where in the picture the
  //training samples are located. Maybe adding more training samples will
  //help improve this?
  /*
  for (int i=0; i < threeArraySize; i++)
  {
      for (int j=0; j < arraySize; j++)
      {
          row2 = ceil(X/4.7291)+ceil(X/8.3176)+i+Mg;
          column2 = ceil(Y/7.78378)+ceil(Y/16.54468)+j+Ng;
      //ground patch sample (n X n matrix)
      //Detecting the horizon in the picture might be an excellent visual aid to
      //choose where (above the horizon) you can take a ground training(1:3*n,1:n)g sample
      //from. The ground pixel sample can be at a constant distance from the
      //horizon
          cvmSet(groundTrainingHue,i,j,I[row2][column2].h);
          cvmSet(groundTrainingSat,i,j,I[row2][column2].s);
          cvmSet(groundTrainingVal,i,j,I[row2][column2].v);   
      //patch is red (this is for me to know where the ground patch sample is)
          I[row2][column2].h = 60; 
          I[row2][column2].s = 180;
          I[row2][column2].v = 90;
      }
  }
  //order the water samples in ascending order on order to know a range
  cvSort(groundTrainingHue, groundTrainingHue, CV_SORT_ASCENDING);
  cvSort(groundTrainingSat, groundTrainingSat, CV_SORT_ASCENDING);
  cvSort(groundTrainingVal, groundTrainingVal, CV_SORT_ASCENDING);
  */ 
  // Main loop. It traverses through the picture
  //skyX = 0; 
  //skyY = 0;
  while (x < boatFront->height/1.158)
  {
      //get a random sample taken from the picture. Must be determined whether
      //it is water or ground
      for (int i = 0; i<pixelsNumber;i++)
      {
          cvmSet(sampleHue,0,i,I[x][y].h);
          cvmSet(sampleSat,0,i,I[x][y].s);
          cvmSet(sampleVal,0,i,I[x][y].v);
      }
      //Find the shortest distance between a pixel and the neighbors from each of
      //the training samples (sort of inefficient, but might do the job...sometimes)
      //if (ix == pixelsNumber-1)
      //{
          //HSV for water sample
          // learn classifier
          //CvKNearest knn(trainData, trainClasses, 0, false, itemsNumber);
          //CvKNearest knnWaterHue(waterTrainingHue, trainClasses, 0, false, pixelsNumber);
          //CvKNearest knnWaterSat(waterTrainingSat, trainClasses, 0, false, pixelsNumber);
          //CvKNearest knnWaterVal(waterTrainingVal, trainClasses, 0, false, pixelsNumber);
          //HSV for ground sample
          //CvKNearest knnGroundHue(groundTrainingHue, trainClasses2, 0, false, pixelsNumber);
          //CvKNearest knnGroundSat(groundTrainingSat, trainClasses2, 0, false, pixelsNumber);
          //CvKNearest knnGroundVal(groundTrainingVal, trainClasses2, 0, false, pixelsNumber);
          //HSV for sky sample
          //if (cvmGet(skyTrainingHue,0,0)!=0.0 && cvmGet(skyTrainingSat,0,0)!=0.0 && cvmGet(skyTrainingVal,0,0)!=0.0)
          //{
            //  CvKNearest knnSkyHue(skyTrainingHue, trainClasses, 0, false, pixelsNumber);
              //CvKNearest knnSkySat(skyTrainingSat, trainClasses, 0, false, pixelsNumber);
              //CvKNearest knnSkyVal(skyTrainingVal, trainClasses, 0, false, pixelsNumber);
          //}
          
          //scan nearest neighbors to each pixel
          //responseWaterH = knnWaterHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestWaterH,0);
          //responseWaterS = knnWaterSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestWaterS,0);
          //responseWaterV = knnWaterVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestWaterV,0);
          //responseGroundH = knnGroundHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestGroundH,0);
          //responseGroundS = knnGroundSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestGroundS,0);
          //responseGroundV = knnGroundVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestGroundV,0);
          for (int i=0;i<pixelsNumber;i++)
          {
              for (int j=0;j<pixelsNumber;j++)
              {
                  if ((minH <= cvmGet(sampleHue,0,j)) || (maxH >= cvmGet(sampleHue,0,j)))
                      //mark water samples as green
                      comparator[0] = 1;
                  else
                      comparator[0] = 0;
      	          if (((minS <= cvmGet(sampleSat,0,j)) || (maxS <= cvmGet(sampleSat,0,j))))
                  //mark water samples as green
                      comparator[1] = 1;
                  else
                      comparator[1] = 0;
                  if ((minV <= cvmGet(sampleVal,0,j)) || (maxV <= cvmGet(sampleVal,0,j)))
                  //mark water samples as green
                      comparator[2] = 1;
                  else
                      comparator[2] = 0;
                    	//count votes
                  for (int i3=0; i3 < 3; i3++)
                      votesSum = votesSum + comparator[i3];
                  //sky detection 
                  if (votesSum > 1) //&& ((sampleSat[i][j] - sampleVal[i][j]) <= 0.1*180)
                  {
                  // classify pixel as water 
                      I[x-pixelsNumber+i][y-pixelsNumber+j].h = 0;
                      I[x-pixelsNumber+i][y-pixelsNumber+j].s = 255;
                      I[x-pixelsNumber+i][y-pixelsNumber+j].v = 255;
                  }
                  votesSum = 0;
              }
          }
          if (y < Y-1)
              y = y + pixelsNumber-1;
          if (y > Y-1)
              y = Y-1;
          else if (y == Y-1)
          {
              x = x + pixelsNumber-1;
              y = pixelsNumber-1;
          }
          //ix = 0;
  }
  //traverse through the image one more time, divide the image in grids of
  // 500x500 pixels, and see how many pixels of water are in each grid. If
  // most of the pixels are labeled water, then mark all the other pixels
  // as water as well
  for(int i = 0; i < 3; i++)
  {   
      comparator[i] = 0;
  }
  //int counter = 0;
  int xDivisor = 20;
  int yDivisor = 20;
  votesSum = 0;
  column1 = 0;
  row1 = 0;
  x = ceil(boatFront->height/2.5);
  obstacleDistance = x;
  y = 0;
  int counter = 0;
  while (x < boatFront->height/1.2)
  {
    //get a random sample taken from the picture. Must be determined whether
    //it is water or ground
    for (int i = 0; i < ceil(boatFront->height/xDivisor); i++)
    {
        for(int j = 0; j < ceil(boatFront->width/yDivisor); j++)
        {
            cvmSet(resampleHue,i,j,I[x+i][y+j].h);
            cvmSet(resampleSat,i,j,I[x+i][y+j].s);
            cvmSet(resampleVal,i,j,I[x+i][y+j].v);
            if(cvmGet(resampleHue,i,j)==0 && cvmGet(resampleSat,i,j)==255 && cvmGet(resampleVal,i,j)==255)
            {
                votesSum++;
            }
        }
    }
    if (votesSum > ((boatFront->height/xDivisor)*(boatFront->width/yDivisor)*(8.9/9)))
    {   
    // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water  
    // We might need to use other smaller quantities (like 5/6 maybe?)
        for (int i = 0; i < ceil(boatFront->height/xDivisor);i++)
        {
            for (int j = 0; j < ceil(boatFront->width/yDivisor); j++)
            {
                row1 = x + i;
                if (row1 > X-1)
                    row1 = X-1;
                column1 = y+j;
                I[row1][column1].h = 0;
                I[row1][column1].s = 255;
                I[row1][column1].v = 255;
            }
        }
    }
    else
    {   
    // If not water, eliminate all red pixels and turn those pixels
    // back to the original color. These pixels shall, then, be marked
    // as obstacles
        for (int i = 0; i < ceil(boatFront->height/xDivisor);i++)
        {
            for (int j = 0; j < ceil(boatFront->width/yDivisor); j++)
            {
                row1 = x + i;
                if (row1 > X-1)
                    row1 = X-1;
                column1 = y+j;
                //the darker the color, the closer the object to the boat
                I[row1][column1].h = 128;    
                I[row1][column1].s = 255;   
                I[row1][column1].v = 255 - counter;
                //I[row1][column1].h = IBackUp[row1][column1].h;
                //I[row1][column1].s = IBackUp[row1][column1].s;
                //I[row1][column1].v = IBackUp[row1][column1].v;
                //counter = counter + 20;
            }
        }
        //The distance formula calculated by plotting points is given by:
  /***********  distance = (1.76e-11)*pow(pixels,3.99)  *****************/
  /***********  pixel = (513.9332077469)pow(distance,0.240675506  *****************/
        // Convert from pixel distance to normal distance in meters
        if(obstacleDistance > sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2)))
        {
            // x,y coordinates of the obstacle
            xObstacleDistance = (1.76e-11)*pow(((boatFront->height/xDivisor)+x)/2, 3.99) ;
            yObstacleDistance = (1.76e-11)*pow(((boatFront->width/yDivisor)+y)/2, 3.99);
            //xWaypoint_msg = xObstacleDistance;
            //yWaypoint_msg = yObstacleDistance;
            //publish position data
            //waypoint_info_pub.publish(xWaypoint_msg);
            //waypoint_info_pub.publish(yWaypoint_msg);
            ROS_INFO("Obstacle coordinates: X = %f meters, Y = %f meters", xObstacleDistance, yObstacleDistance);  
            obstacleDistance = sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2));
            ROS_INFO("Obstacle distance from: %f", obstacleDistance);
        }
        //cout << "Distance to Obstacle is: " << obstacleDistance << endl << endl;
    }
    y = y + boatFront->width/xDivisor;
    if (y > Y-1)
    {
        x = x + boatFront->height/yDivisor;
        y = 0;
        counter = counter + 30;
    }
    votesSum = 0;
  }
    
  cvCvtColor(boatFront, boatFront, CV_HSV2BGR);
  cvCvtColor(backUpImage, backUpImage, CV_HSV2BGR);
  
  /**************************************************************************/
	try
  {
    cvStartWindowThread();
  	cvNamedWindow("Boat Front",0);            //0 to maintains sizes regardless of image size
  	cvResizeWindow("Boat Front",400,250);     // new width/heigh in pixels
    cvShowImage("Boat Front", boatFront);
    cvResetImageROI(boatFront);
    cvReleaseImage(&boatFront);
    cvResetImageROI(backUpImage);
    cvReleaseImage(&backUpImage);
    cvReleaseMat(&trainClasses);
  }
  catch (sensor_msgs::CvBridgeException& e)
  {
    ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
  }
}
void ImageProcessing::getDisparity() {


    int windowSize = 9;
    int DSR = 20;
    //char *leftImgPath, *rightImgPath;
    //cout<<"Enter full path of Left image ";
    //cin>>leftImgPath;
    //cout<<"Enter full path of Left image ";
    //cin>>rightImgPath;
    IplImage *LeftinputImage = cvLoadImage("../outputs/raw/left-0.pgm", 0);
    IplImage *RightinputImage = cvLoadImage("../outputs/raw/right-0.pgm", 0);

    //    int width = LeftinputImage->width;
    //    int height = LeftinputImage->height;

    /****************8U to 32F**********************/
    IplImage *LeftinputImage32 = cvCreateImage(cvSize(LeftinputImage->width, LeftinputImage->height), 32, 1);
    //IPL_DEPTH_32F
    IplImage *RightinputImage32 = cvCreateImage(cvSize(LeftinputImage->width, LeftinputImage->height), 32, 1);
    cvConvertScale(LeftinputImage, LeftinputImage32, 1 / 255.);
    cvConvertScale(RightinputImage, RightinputImage32, 1 / 255.);

    int offset = floor((double) windowSize / 2);
    int height = LeftinputImage32->height;
    int width = LeftinputImage32->width;
    double *localNCC = new double[DSR];

    int x = 0, y = 0, d = 0, m = 0;
    int N = windowSize;

    IplImage *leftWinImg = cvCreateImage(cvSize(N, N), 32, 1);
    //mySubImage(LeftinputImage32,cvRect(0,0,N,N));
    IplImage *rightWinImg = cvCreateImage(cvSize(N, N), 32, 1);
    //mySubImage(RightinputImage32,cvRect(0,0,N,N));
    IplImage *disparity = cvCreateImage(cvSize(width, height), 8, 1);
    //or IPL_DEPTH_8U
    BwImage imgA(disparity);

    for (y = 0; y < height; y++) {
        for (x = 0; x < width; x++) {
            imgA[y][x] = 0;
        }
    }

    CvScalar s1;
    CvScalar s2;
    for (y = 0; y < height - N; y++) {
        //height-N
        for (x = 0; x < width - N; x++) {
            //width-N
            //getWindow(i,j,leftim,wl,N);
            cvSetImageROI(LeftinputImage32, cvRect(x, y, N, N));
            s1 = cvAvg(LeftinputImage32, NULL);
            cvSubS(LeftinputImage32, s1, leftWinImg, NULL); //zero-means
            cvNormalize(leftWinImg, leftWinImg, 1, 0, CV_L2, NULL);
            d = 0;

            //initialise localNCC
            for (m = 0; m < DSR; m++) {
                localNCC[m] = 0;
            }

            do {
                if (x - d >= 0) {

                    cvSetImageROI(RightinputImage32, cvRect(x - d, y, N, N));
                    s2 = cvAvg(RightinputImage32, NULL);
                    cvSubS(RightinputImage32, s2, rightWinImg, NULL); //zero-means
                    cvNormalize(rightWinImg, rightWinImg, 1, 0, CV_L2, NULL);
                } else {
                    break;
                }
                localNCC[d] = cvDotProduct(leftWinImg, rightWinImg);
                cvResetImageROI(RightinputImage32);
                d++;
            }            while (d <= DSR);

            //to find the best d and store
            imgA[y + offset][x + offset] = getMaxMin(localNCC, DSR, 1) *16;
            cvResetImageROI(LeftinputImage32);
        } //x
        if (y % 10 == 0)
            cout << "row=" << y << " of " << height << endl;
    } //y

    cvReleaseImage(&leftWinImg);
    cvReleaseImage(&rightWinImg);

    cvSaveImage("disparity.pgm", disparity);
    waitHere();
    //cv::imwrite("disparity.pgm",&disparity);
    cout << "Displaying Disparity image" << endl;
    // cvShowImage( "Disparity", disparity);
    //cv::waitKey(0);
    //return disparity;

}
Пример #29
0
    CvSeq* detect()
    {
        if (!cascade) return 0;
        double scale = this->scale == 0? 1.0 : this->scale;
        IplImage* gray = cvCreateImage(cvSize(width, height ), 8, 1);
        IplImage* small = cvCreateImage(cvSize(cvRound(width * scale), cvRound(height * scale)), 8, 1);
        int min = cvRound(smallest * 1000);            
        CvSeq* faces = 0;
        
        // use a region of interest to improve performance
        // This idea comes from the More than Technical blog:
        // http://www.morethantechnical.com/2009/08/09/near-realtime-face-detection-on-the-iphone-w-opencv-port-wcodevideo/
        if ( roi.width > 0 && roi.height > 0)
        {
            cvSetImageROI(small, roi);
            CvRect scaled_roi = cvRect(roi.x / scale, roi.y / scale,
                                       roi.width / scale, roi.height / scale);
            cvSetImageROI(image, scaled_roi);
            cvSetImageROI(gray, scaled_roi);
        }
        
        // use an equalized grayscale to improve detection
        cvCvtColor(image, gray, CV_BGR2GRAY);
        // use a smaller image to improve performance
        cvResize(gray, small, CV_INTER_LINEAR);
        cvEqualizeHist(small, small);
        
        // detect with OpenCV
        cvClearMemStorage(storage);
        faces = cvHaarDetectObjects(small, cascade, storage,
                                    search_scale * 10.0,
                                    cvRound(neighbors * 100),
                                    CV_HAAR_DO_CANNY_PRUNING,
                                    cvSize(min, min));
        
#ifdef USE_ROI
        if (!faces || faces->total == 0)
        {
            // clear the region of interest
            roi.width = roi.height = 0;
        }
        else if (faces && faces->total > 0)
        {
            // determine the region of interest from the first detected object
            // XXX: based on the first object only?
            CvRect* r = (CvRect*) cvGetSeqElem(faces, 0);
            
            if (roi.width > 0 && roi.height > 0)
            {
                r->x += roi.x;
                r->y += roi.y;
            }
            int startX = MAX(r->x - PAD, 0);
            int startY = MAX(r->y - PAD, 0);
            int w = small->width - startX - r->width - PAD * 2;
            int h = small->height - startY - r->height - PAD * 2;
            int sw = r->x - PAD, sh = r->y - PAD;
            
            // store the region of interest
            roi.x = startX;
            roi.y = startY,
            roi.width = r->width + PAD * 2 + ((w < 0) ? w : 0) + ((sw < 0) ? sw : 0);
            roi.height = r->height + PAD * 2 + ((h < 0) ? h : 0) + ((sh < 0) ? sh : 0); 
        }
#endif
        cvReleaseImage(&gray);
        cvReleaseImage(&small);
        cvResetImageROI(image);
        return faces;
    }
Пример #30
0
static void get_next_frame(void*)
{
    static int repositioning = 0;
    IplImage* frame = 0;
    double new_pos = video_pos->value();
    
    if( (new_pos-old_pos >= 1e-10 || new_pos-old_pos <= -1e-10) && !repositioning)
    {
        video_window->redraw();
        cvSetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO, new_pos );
        new_pos = cvGetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO );
        printf("Repositioning\n");
        repositioning = 1;
    }
    else
    {
        new_pos = cvGetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO );
        video_pos->value(new_pos);
        repositioning = 0;
    }
    old_pos = new_pos;
    frame = cvQueryFrame( capture );

    if( frame == 0 && is_avi )
    {
        cb_Stop(0,0);
        return;
    }

    if( video_window && frame )
    {
        if( video_window->w() < frame->width || video_window->h() < frame->height )
            root_window->size( (short)(frame->width + 40), (short)(frame->height + 150));

        CvRect rect = { video_window->x(), video_window->y(),
                        frame->width, frame->height };
        
        if( !video_image || video_image->width < rect.width ||
            video_image->height < rect.height )
        {
            cvReleaseImage( &video_image );
            video_image = cvCreateImage( cvSize( rect.width, rect.height ), 8, 3 );
        }

        cvSetImageROI( video_image, cvRect(0,0,rect.width, rect.height));
        if( frame->origin == 1 )
            cvFlip( frame, video_image, 0 );
        else
            cvCopy( frame, video_image, 0 );

        DetectAndDrawFaces( video_image );
        if( writer && is_recorded )
        {
            cvWriteToAVI( writer, video_image );
        }
        cvCvtColor( video_image, video_image, CV_RGB2BGR );

        uchar* data = 0;
        int step = 0;
        CvSize size;
        cvGetRawData( video_image, &data, &step, &size );

        video_window->redraw();
        fl_draw_image( (uchar*)data, video_window->x(), video_window->y(),
                       size.width, size.height, 3, step );
    }

    if( started )
    {
        double cur_frame_stamp = get_time_accurate();
        // update fps
        if( fps < 0 )
            fps = 1000/(cur_frame_stamp - prev_frame_stamp);
        else
            fps = (1-fps_alpha)*fps + fps_alpha*1000/(cur_frame_stamp - prev_frame_stamp);
        prev_frame_stamp = cur_frame_stamp;
        sprintf( fps_buffer, "FPS: %5.1f", fps );
        fps_box->label( fps_buffer );
        fps_box->redraw();
        if( total_frames > 0 )
        {
            if( --total_frames == 0 )
                if( !is_loopy )
                    cb_Exit(0,0);
                else
                {
                    total_frames = total_frames0;
                    cvSetCaptureProperty( capture, CV_CAP_PROP_POS_FRAMES, start_pos );
                }
        }
        Fl::add_timeout( timeout, get_next_frame, 0 );
    }
}