コード例 #1
0
int main( int argc, char** argv )
{
    if (argc == 1) {
        std::cerr << "Usage: ./transform_image -i source.pgm -o warped.pgm [-t warp.xfm] [mode]\n"
                  << "  Rotation angle:           -a 60\n"
                  << "  Scale factor:             -s 1.5\n"
                  << "  Use homography from file: -h warp.xfm\n";
        return 0;
    }
    
    char *image_file = NULL, *transform_file = NULL, *homography_file = NULL;
    std::string out_file = "warped.pgm";
    float angle = 0; //degrees
    float scaling = 0;
    Mode mode = None;
    
    int arg = 0;
    while (++arg < argc) {
        if (! strcmp(argv[arg], "-i"))
            image_file = argv[++arg];
        if (! strcmp(argv[arg], "-o"))
            out_file = argv[++arg];
        if (! strcmp(argv[arg], "-t"))
            transform_file = argv[++arg];
        if (! strcmp(argv[arg], "-a")) {
            angle = atof(argv[++arg]);
            mode = Rotate;
        }
        if (! strcmp(argv[arg], "-s")) {
            scaling = atof(argv[++arg]);
            mode = Scale;
        }
        if (! strcmp(argv[arg], "-h")) {
            homography_file = argv[++arg];
            mode = Transform;
        }
    }
    assert(image_file);
    assert(mode != None);

    IplImage* loaded = cvLoadImage(image_file, CV_LOAD_IMAGE_GRAYSCALE);
    assert(loaded);
    int W = loaded->width;
    int H = loaded->height;

    CvMat* transform = NULL;
    IplImage* warped = NULL;
    int flags = CV_INTER_CUBIC | CV_WARP_FILL_OUTLIERS;

    if (mode == Rotate) {
        transform = cvCreateMat(2, 3, CV_32FC1);
        CvSize warped_size = FullImageRotation(W, H, angle, transform);
        warped = cvCreateImage(warped_size, IPL_DEPTH_8U, 1);
        cvWarpAffine(loaded, warped, transform, flags);
    }
    else if (mode == Scale) {
        transform = cvCreateMat(2, 3, CV_32FC1);
        cvZero(transform);
        float* data = transform->data.fl;
        *data = scaling;
        data[transform->step/sizeof(float) + 1] = scaling;
        CvSize warped_size = cvSize(W*scaling + 0.5, H*scaling + 0.5);
        warped = cvCreateImage(warped_size, IPL_DEPTH_8U, 1);
        cvWarpAffine(loaded, warped, transform, flags);
    }
    else if (mode == Transform) {
        transform = cvCreateMat(3, 3, CV_32FC1);
        ReadTransform(homography_file, transform);
        warped = cvCreateImage(cvSize(W, H), IPL_DEPTH_8U, 1);
        cvWarpPerspective(loaded, warped, transform, flags);
    }
    
    cvSaveImage(out_file.c_str(), warped);

    if (transform_file)
        WriteTransform(transform_file, transform);
    
    return 0;
}
コード例 #2
0
int main(int argc, char *argv[])
{
	int height, width, step, channels;
	unsigned char *data;
	char *window = "Object Detection";
	int i, j, k;

	//If we do not have an input image
	if(argc < 2)
	{
		printf("Usage: object-detection <image-file>\n");
		exit(0);
	}

	//Load image from input
	IplImage *img = 0;
	IplImage *src = 0;
	src =  cvLoadImage(argv[1], LOAD_RGB);
	//CvCapture *capture = cvCaptureFromCAM(0);

	if(!src)
	{
		printf("Could not load image file: %s\n", argv[1]);
		exit(0);
	}

	
	/*if(!cvGrabFrame(capture))
	{
		printf("Could not capture device\n");
		exit(0);
	}*/

	//img = cvRetrieveFrame(capture,1);
	
	//Get the image data
	height    = src->height;
	width     = src->width;
	step      = src->widthStep;
	channels  = src->nChannels;
	data      = (unsigned char *)src->imageData;
	

	img = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	
	cvCvtColor(src, img, CV_BGR2GRAY);

	height    = img->height;
	width     = img->width;
	step      = img->widthStep;
	channels  = img->nChannels;
	data      = (unsigned char *)img->imageData;
	
	//Information about the image
	printf("Processing a %dx%d image with %d channels\n", height, width, channels); 

	//Set up basic window
	cvNamedWindow(window, CV_WINDOW_AUTOSIZE);
	cvMoveWindow(window, 100, 100);
	
	//unsigned char *test = (unsigned char *)malloc(sizeof(unsigned char) * width * height);
	IplImage *b = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	IplImage *g = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	IplImage *r = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);

	cvSplit(src, b, g, r, 0);	
	steeringKernel(b);
	steeringKernel(g);
	steeringKernel(r);

	cvMerge(b, g, r, 0, src);

	//steeringKernel(img);

	//memcpy(data, test, sizeof(unsigned char) * width * height);
	//Invert the image
	//bilateralKernel(src);

	//pca(src);
	//Display the image on the windowi
	cvShowImage(window, src);
    //cvShowImage("b", b);
	//cvShowImage("g", g);
	cvSaveImage("object-detection-output.jpg", img, 0);

	//Wait key to signal exit  
	cvWaitKey(0);

	//Releases the image
	cvReleaseImage(&img);
	//cvReleaseCapture(&capture);
	return 0;
}
コード例 #3
0
ファイル: convexhull2.cpp プロジェクト: rovim/LSMRecognition
int main(int argc, char** argv)
{
	CvMemStorage* mstrg = cvCreateMemStorage();
	CvSeq* contours = 0; 
	CvSeq* contours2 = 0; 

	const char* filename = 0;
	IplImage* rawImage = 0, *yuvImage = 0, *borde = 0; //yuvImage is for codebook method
	IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
	CvCapture* capture = 0;		

	int c, n, nframes = 0;
	int nframesToLearnBG = 300;

	model = cvCreateBGCodeBookModel();

	//Set color thresholds to default values
	model->modMin[0] = 3;
	model->modMin[1] = model->modMin[2] = 3;
	model->modMax[0] = 10;
	model->modMax[1] = model->modMax[2] = 10;
	model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;

	bool pause = false;
	bool singlestep = false;

	printf("Capturando de la camara...\n");
	capture = cvCaptureFromCAM( 0 );

	if( !capture )
	{
		printf( "No se pudo inicializar la captura de video\n\n" );
		return -1;
	}

	while (true)
	{

		rawImage = cvQueryFrame( capture );
		++nframes;
		if(!rawImage) 
			break;


		//First time:
		if( nframes == 1 && rawImage )
		{
			borde = cvLoadImage("Borde.png",0);

			// CODEBOOK METHOD ALLOCATION
			yuvImage = cvCloneImage(rawImage);

			int w = yuvImage->width;
			cvSetImageROI(yuvImage, cvRect(w-250,0,250,250));
			IplImage *tmp = cvCreateImage(cvGetSize(yuvImage),yuvImage->depth,yuvImage->nChannels);
			cvCopy(yuvImage, tmp, NULL);
			cvResetImageROI(yuvImage);
			yuvImage = cvCloneImage(tmp);

			ImaskCodeBook = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );
			ImaskCodeBookCC = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );

			cvSet(ImaskCodeBook,cvScalar(255));

			cvNamedWindow("CapturaCam",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "ForegroundCodeBook",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "CodeBook_ConnectComp",CV_WINDOW_AUTOSIZE);

			printf (">>Aprendiendo fondo\n");
		}

		// If we've got an rawImage and are good to go:                
		if( rawImage )
		{
			cvFlip(rawImage, NULL, 1);
			int w = rawImage->width;

			cvFindContours(borde,mstrg,&contours,sizeof(CvContour),CV_RETR_EXTERNAL);

			//Dibujar contorno
			cvLine(rawImage, cv::Point (w-250,0), cv::Point (w-250,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			cvLine(rawImage, cv::Point (w-250,250), cv::Point (w,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			//
			if(nframes - 1 < nframesToLearnBG)
			{
				char buffer [33];
				_itoa (nframesToLearnBG - nframes,buffer,10);
				CvFont font2;
				cvInitFont(&font2, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 3, CV_AA);
				cvPutText(rawImage, buffer, cvPoint(50, 50), &font2, cvScalar(0, 0, 255, 0));
			}

			cvSetImageROI(rawImage, cvRect(w-250,0,250,250));
			IplImage *temp = cvCreateImage(cvGetSize(rawImage),rawImage->depth,rawImage->nChannels);

			cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );
			//YUV para el metodo del codebook

			//Construccion del modelo del fondo
			if(nframes-1 < nframesToLearnBG  )
				cvBGCodeBookUpdate( model, yuvImage );


			if( nframes-1 == nframesToLearnBG  )
			{
				cvBGCodeBookClearStale( model, model->t/2 );
				printf (">>Fondo aprendido\n");
			}

			//Se encuentran objetos por el metodo de codebook
			if( nframes-1 >= nframesToLearnBG  )
			{
				cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );

				cvCopy(ImaskCodeBook,ImaskCodeBookCC);	
				cvSegmentFGMask( ImaskCodeBookCC );

				cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);

				//deteccion de imagen
				detect(ImaskCodeBookCC,rawImage);

				//base para dibujar la mano
				if(contours)
					cvDrawContours(rawImage,contours, cvScalar(255, 0, 0, 0), cvScalarAll(128), 1 );


			}
			//Display
			cvResetImageROI(rawImage);
			cvShowImage( "CapturaCam", rawImage );
			cvShowImage( "ForegroundCodeBook",ImaskCodeBook);

		}

		// User input:
		c = cvWaitKey(10)&0xFF;
		c = tolower(c);
		// End processing on ESC, q or Q
		if(c == 27 || c == 'q')
			break;
		//Else check for user input
		switch( c )
		{
		case 'c':
			saveLength = true;
			break;        
		case ' ':
			cvBGCodeBookClearStale( model, 0 );
			nframes = 0;
			break;            
		}

		if (c != 'c')
			saveLength=false;
	}		

	cvReleaseCapture( &capture );
	cvReleaseMemStorage(&mstrg);
	cvDestroyWindow( "CapturaCam" );
	cvDestroyWindow( "ForegroundCodeBook");
	cvDestroyWindow( "CodeBook_ConnectComp");
	return 0;
}
コード例 #4
0
ファイル: SSIM.cpp プロジェクト: Anjan369/ssim
/*
 * Parameters : complete path to the two image to be compared
 * The file format must be supported by your OpenCV build
 */
int main(int argc, char** argv)
{
	if(argc!=3)
		return -1;
	
	// default settings
	double C1 = 6.5025, C2 = 58.5225;

	IplImage
		*img1=NULL, *img2=NULL, *img1_img2=NULL,
		*img1_temp=NULL, *img2_temp=NULL,
		*img1_sq=NULL, *img2_sq=NULL,
		*mu1=NULL, *mu2=NULL,
		*mu1_sq=NULL, *mu2_sq=NULL, *mu1_mu2=NULL,
		*sigma1_sq=NULL, *sigma2_sq=NULL, *sigma12=NULL,
		*ssim_map=NULL, *temp1=NULL, *temp2=NULL, *temp3=NULL;
	

	/***************************** INITS **********************************/
	img1_temp = cvLoadImage(argv[1]);
	img2_temp = cvLoadImage(argv[2]);

	if(img1_temp==NULL || img2_temp==NULL)
		return -1;

	int x=img1_temp->width, y=img1_temp->height;
	int nChan=img1_temp->nChannels, d=IPL_DEPTH_32F;
	CvSize size = cvSize(x, y);

	img1 = cvCreateImage( size, d, nChan);
	img2 = cvCreateImage( size, d, nChan);

	cvConvert(img1_temp, img1);
	cvConvert(img2_temp, img2);
	cvReleaseImage(&img1_temp);
	cvReleaseImage(&img2_temp);

	
	img1_sq = cvCreateImage( size, d, nChan);
	img2_sq = cvCreateImage( size, d, nChan);
	img1_img2 = cvCreateImage( size, d, nChan);
	
	cvPow( img1, img1_sq, 2 );
	cvPow( img2, img2_sq, 2 );
	cvMul( img1, img2, img1_img2, 1 );

	mu1 = cvCreateImage( size, d, nChan);
	mu2 = cvCreateImage( size, d, nChan);

	mu1_sq = cvCreateImage( size, d, nChan);
	mu2_sq = cvCreateImage( size, d, nChan);
	mu1_mu2 = cvCreateImage( size, d, nChan);
	

	sigma1_sq = cvCreateImage( size, d, nChan);
	sigma2_sq = cvCreateImage( size, d, nChan);
	sigma12 = cvCreateImage( size, d, nChan);

	temp1 = cvCreateImage( size, d, nChan);
	temp2 = cvCreateImage( size, d, nChan);
	temp3 = cvCreateImage( size, d, nChan);

	ssim_map = cvCreateImage( size, d, nChan);
	/*************************** END INITS **********************************/


	//////////////////////////////////////////////////////////////////////////
	// PRELIMINARY COMPUTING
	cvSmooth( img1, mu1, CV_GAUSSIAN, 11, 11, 1.5 );
	cvSmooth( img2, mu2, CV_GAUSSIAN, 11, 11, 1.5 );
	
	cvPow( mu1, mu1_sq, 2 );
	cvPow( mu2, mu2_sq, 2 );
	cvMul( mu1, mu2, mu1_mu2, 1 );


	cvSmooth( img1_sq, sigma1_sq, CV_GAUSSIAN, 11, 11, 1.5 );
	cvAddWeighted( sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq );
	
	cvSmooth( img2_sq, sigma2_sq, CV_GAUSSIAN, 11, 11, 1.5 );
	cvAddWeighted( sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq );

	cvSmooth( img1_img2, sigma12, CV_GAUSSIAN, 11, 11, 1.5 );
	cvAddWeighted( sigma12, 1, mu1_mu2, -1, 0, sigma12 );
	

	//////////////////////////////////////////////////////////////////////////
	// FORMULA

	// (2*mu1_mu2 + C1)
	cvScale( mu1_mu2, temp1, 2 );
	cvAddS( temp1, cvScalarAll(C1), temp1 );

	// (2*sigma12 + C2)
	cvScale( sigma12, temp2, 2 );
	cvAddS( temp2, cvScalarAll(C2), temp2 );

	// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
	cvMul( temp1, temp2, temp3, 1 );

	// (mu1_sq + mu2_sq + C1)
	cvAdd( mu1_sq, mu2_sq, temp1 );
	cvAddS( temp1, cvScalarAll(C1), temp1 );

	// (sigma1_sq + sigma2_sq + C2)
	cvAdd( sigma1_sq, sigma2_sq, temp2 );
	cvAddS( temp2, cvScalarAll(C2), temp2 );

	// ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
	cvMul( temp1, temp2, temp1, 1 );

	// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
	cvDiv( temp3, temp1, ssim_map, 1 );


	CvScalar index_scalar = cvAvg( ssim_map );
	
	// through observation, there is approximately 
	// 1% error max with the original matlab program

	cout << "(R, G & B SSIM index)" << endl ;
	cout << index_scalar.val[2] * 100 << "%" << endl ;
	cout << index_scalar.val[1] * 100 << "%" << endl ;
	cout << index_scalar.val[0] * 100 << "%" << endl ;

	// if you use this code within a program
	// don't forget to release the IplImages
	return 0;
}
コード例 #5
0
ファイル: zoom.c プロジェクト: truongminh/ccache
void zoomImg(safeQueue *sq, struct bio_job *job)
{
    /* Search tmp folder */

    char *uri = job->name+strlen(SERVICE_ZOOM) + 1;
    sds dstpath = zoomePathInTmpDir(uri);
    //job->result = ufileMakeHttpReplyFromFile(dstpath);
    job->result = ufileMmapHttpReply(dstpath);
    printf("After Read File %.2lf \n", (double)(clock()));
    if(job->result) {
        sdsfree(dstpath);
        safeQueuePush(sq,job); /* the current job will be freed by master */
        return;
    }

    int width = 0, height = 0;
    sds fn = NULL;
    sds srcpath = NULL;
    IplImage* src = NULL;
    IplImage* dst = NULL;
    IplImage* toencode = NULL;
    CvMat* enImg = NULL;
    int notpushed = 1;
    int iscrop = 1;
    int p[3];
    p[0] = CV_IMWRITE_JPEG_QUALITY;
    p[1] = IMG_DEFAULT_QUALITY;
    p[2] = 0;
    uchar *buf = NULL;
    size_t len = 0;
    uri_parse_state state = img_parse_uri(uri,&fn,&width,&height, &iscrop, &p[1]);
    if(state == parse_error) goto clean;
    // initializations
    srcpath = bioPathInSrcDir(fn);    
    printf("Before Load Image %.2lf \n", (double)(clock()));
    src = cvLoadImage(srcpath, CV_LOAD_IMAGE_COLOR);
    printf("After Load Image %.2lf \n", (double)(clock()));
    /* validate that everything initialized properly */
    if(!src)
    {
        ulog(CCACHE_VERBOSE,"can't load image file: %s\n",srcpath);
        goto clean;
    }

    int src_width = src->width;
    int src_height = src->height;
    int roi_src_width = src_width;
    int roi_src_height = src_height;


    if(width&&height) {
        /* Preserve origial ratio */
        /* NOTICE: dangerous type conversion */
        roi_src_width = src_height*width/height;
        roi_src_height = src_width*height/width;
        if(roi_src_width>src_width) roi_src_width = src_width;
        if(roi_src_height>src_height) roi_src_height = src_height;
    }
    else if(!width&&height) {
        width = src_width;
    }
    else if(width&&!height) {
        height = src_height;
    }
    else {
        toencode = src;
    }

    if(!toencode) {
        if(iscrop) {
            int x = (src_width - roi_src_width)/2;
            int y = (src_height - roi_src_height)/2;
            // Say what the source region is
            cvSetImageROI( src, cvRect(x,y,roi_src_width,roi_src_height));
        }

        dst = cvCreateImage(cvSize(width,height), src->depth, src->nChannels);
        if(!dst) goto clean;

        cvResize(src,dst,CV_INTER_CUBIC);
        printf("After Resize Image %.2lf \n", (double)(clock()));


        if(iscrop) {
            cvResetImageROI( src );
        }

        toencode = dst;
    }


    enImg = cvEncodeImage(IMG_ENCODE_DEFAULT, toencode, p );

    printf("After Encode Image %.2lf \n", (double)(clock()));

    buf = enImg->data.ptr;
    len = enImg->rows*enImg->cols;
    job->result = ufilMakettpReplyFromBuffer(buf,len);
    job->type |= BIO_WRITE_FILE; /* Remind master of new written file  */
    safeQueuePush(sq,job);    
    notpushed = 0;

  /* clean up and release resources */
clean:
    if(notpushed) {
        job->result = NULL;
        safeQueuePush(sq,job);
    }
    if(fn) sdsfree(fn);
    if(srcpath) sdsfree(srcpath);
    if(src) cvReleaseImage(&src);
    if(enImg){
        saveImage(dstpath, buf, len);
        cvReleaseMat(&enImg);
    }
    sdsfree(dstpath);
    if(dst) cvReleaseImage(&dst);
    return;
}
コード例 #6
0
/*!
    \fn CvBinGabAdaFeatureSelect::svmlearning(const char* path, int nofeatures, CvSVM * svm)
 */
void CvBinGabAdaFeatureSelect::svmlearning(const char* path, int nofeatures, CvSVM * svm)
{
  if( db_type == XM2VTS )
  {
    printf("Training an SVM classifier  ................\n");
    CvXm2vts *xm2vts = (CvXm2vts*)database;
    int nTrainingExample = 200*4;
    CvMat* trainData = cvCreateMat(nTrainingExample, nofeatures, CV_32FC1);
    CvMat* response = cvCreateMat(nTrainingExample, 1, CV_32FC1);
    
    for (int i = 0; i < nofeatures; i++)
    {
      /* load feature value */
      CvGaborFeature *feature;
      feature = new_pool->getfeature(i);
      printf("Getting the %d feature ............\n", i+1);
      
      char *filename = new char[50];
      //training validation
      double l, t;
      int fal = 0;
      for(int sub = 1; sub <= 200; sub++)
      {
        if (((CvXm2vts*)database)->getGender( sub )) t = 1.0;
        else t = 2.0;
        
        for(int pic = 1; pic <= 4; pic++)
        {
          sprintf(filename, "%s/%d_%d.bmp", path, sub, pic);
          IplImage *img = cvLoadImage( filename, CV_LOAD_IMAGE_ANYCOLOR );
          IplImage *grayimg = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
          if ( img->nChannels == 1 )  cvCopy( img, grayimg, NULL );
          else if (img->nChannels == 3)   cvCvtColor( img, grayimg, CV_RGB2GRAY );
          
          double vfeature = feature->val( img );
          cvSetReal2D( trainData, ((sub-1)*4+(pic-1)), i, vfeature );
          cvSetReal1D( response, ((sub-1)*4+(pic-1)), t );
          cvReleaseImage(&img);
          cvReleaseImage(&grayimg);
        }
      }
      delete [] filename;
    }
    
    
    printf("building the svm classifier .........................\n");
    CvTermCriteria term_crit = cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 200, 0.8);
  /*Type of SVM, one of the following types:
    CvSVM::C_SVC - n-class classification (n>=2), allows imperfect separation of classes with penalty multiplier C for outliers.
    CvSVM::NU_SVC - n-class classification with possible imperfect separation. Parameter nu (in the range 0..1, the larger the value, the smoother the decision boundary) is used instead of C.
    CvSVM::ONE_CLASS - one-class SVM. All the training data are from the same class, SVM builds a boundary that separates the class from the rest of the feature space.
    CvSVM::EPS_SVR - regression. The distance between feature vectors from the training set and the fitting hyperplane must be less than p. For outliers the penalty multiplier C is used.
    CvSVM::NU_SVR - regression; nu is used instead of p. */
    int _svm_type = CvSVM::NU_SVC;
  /*The kernel type, one of the following types:
    CvSVM::LINEAR - no mapping is done, linear discrimination (or regression) is done in the original feature space. It is the fastest option. d(x,y) = x•y == (x,y)
    CvSVM::POLY - polynomial kernel: d(x,y) = (gamma*(x•y)+coef0)degree
    CvSVM::RBF - radial-basis-function kernel; a good choice in most cases: d(x,y) = exp(-gamma*|x-y|2)
    CvSVM::SIGMOID - sigmoid function is used as a kernel: d(x,y) = tanh(gamma*(x•y)+coef0) */
    
    int _kernel_type = CvSVM::POLY;
    
    double _degree = 3.0;
    double _gamma = 1.0;
    double _coef0 = 0.0;
    double _C = 1.0;
    double _nu = 1.0;
    double _p = 1.0;
    
    CvSVMParams  params( CvSVM::C_SVC, CvSVM::POLY, _degree, _gamma, _coef0, _C, _nu, _p,
                         0, term_crit );
    
    svm->train( trainData, response, 0, 0, params );
    
    svm->save( "svm.xml", "svm" );
    cvReleaseMat(&response);
    cvReleaseMat(&trainData);
  }
}
コード例 #7
0
ファイル: main.cpp プロジェクト: Barbakas/windage
void main()
{
	windage::Logger logger(&std::cout);

	IplImage* grabImage;
	IplImage* inputImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 4);
	IplImage* resizeImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* grayImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* resultImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);

	FleaCamera* capture = new FleaCamera();
	capture->open();
	capture->start();
//	CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);
	cvNamedWindow("result");

	// create and initialize tracker
	//IMPORTANT
	windage::Frameworks::PlanarObjectTracking tracking;

	windage::Calibration* calibration;
	windage::Algorithms::FeatureDetector* detector;
	windage::Algorithms::SearchTree* searchtree;
	windage::Algorithms::OpticalFlow* opticalflow;
	windage::Algorithms::HomographyEstimator* estimator;
	windage::Algorithms::OutlierChecker* checker;
	windage::Algorithms::HomographyRefiner* refiner;

	calibration = new windage::Calibration();
	detector = new windage::Algorithms::WSURFdetector();
	searchtree = new windage::Algorithms::KDtree();
	opticalflow = new windage::Algorithms::OpticalFlow();
	estimator = new windage::Algorithms::RANSACestimator();
	checker = new windage::Algorithms::OutlierChecker();
	refiner = new windage::Algorithms::LMmethod();

	calibration->Initialize(INTRINSIC[0], INTRINSIC[1], INTRINSIC[2], INTRINSIC[3], INTRINSIC[4], INTRINSIC[5], INTRINSIC[6], INTRINSIC[7]);
	searchtree->SetRatio(0.7);
	opticalflow->Initialize(WIDTH, HEIGHT, cvSize(15, 15), 3);
	estimator->SetReprojectionError(REPROJECTION_ERROR);
	checker->SetReprojectionError(REPROJECTION_ERROR * 3);
	refiner->SetMaxIteration(10);

	tracking.AttatchCalibration(calibration);
	tracking.AttatchDetetor(detector);
	tracking.AttatchMatcher(searchtree);
	tracking.AttatchTracker(opticalflow);
	tracking.AttatchEstimator(estimator);
	tracking.AttatchChecker(checker);
	tracking.AttatchRefiner(refiner);
//	tracking.AttatchFilter(filter);

	tracking.SetDitectionRatio(1);
	tracking.Initialize(WIDTH, HEIGHT, (double)WIDTH, (double)HEIGHT);

	int keypointCount = 0;
	int matchingCount = 0;
	double threshold = detector->GetThreshold();
	double processingTime = 0.0;

	bool trained = false;

#if USE_TEMPLATE_IMAEG
	IplImage* sampleImage = cvLoadImage(TEMPLATE_IMAGE, 0);
	detector->SetThreshold(threshold);
	tracking.AttatchReferenceImage(sampleImage);
	tracking.TrainingReference(SCALE_FACTOR, SCALE_STEP);
	detector->SetThreshold(threshold);
	trained = true;
#endif

	char message[100];
	bool flip = false;
	bool processing = true;
	while(processing)
	{
		// capture image
		capture->update();
		grabImage = capture->GetIPLImage();
//		inputImage = cvRetrieveFrame(capture);
		cvResize(grabImage, inputImage);
		cvCvtColor(inputImage, resultImage, CV_BGRA2BGR);
		cvCvtColor(resultImage, grayImage, CV_BGR2GRAY);
		if(flip)
			cvFlip(inputImage, inputImage);

		logger.updateTickCount();

		// track object
		if(trained)
		{
			//IMPORTANT
			tracking.UpdateCamerapose(grayImage);

			// adaptive threshold
#if USE_ADAPTIVE_THRESHOLD
			int localcount = detector->GetKeypointsCount();
			if(keypointCount != localcount)
			{
				if(localcount > FEATURE_COUNT)
					threshold += 1;
				if(localcount < FEATURE_COUNT)
					threshold -= 1;
				detector->SetThreshold(threshold);
				keypointCount = localcount;
			}
#endif
			// draw result
//			detector->DrawKeypoints(resultImage);

			tracking.DrawOutLine(resultImage, true);
			tracking.DrawDebugInfo(resultImage);

			windage::Calibration* result = tracking.GetCameraParameter();

			calibration->DrawInfomation(resultImage, 100);
		}
		matchingCount = tracking.GetMatchingCount();

		processingTime = logger.calculateProcessTime();
		logger.log("processingTime", processingTime);
		logger.logNewLine();

		sprintf_s(message, "Processing Time : %.2lf ms", processingTime);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 20), 0.6, message);
		sprintf_s(message, "Feature Count : %d, Threshold : %.0lf", keypointCount, threshold);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 40), 0.6, message);
		sprintf_s(message, "Matching Count : %d", matchingCount);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 60), 0.6, message);

		sprintf_s(message, "Press 'Space' to track the current image");
		windage::Utils::DrawTextToImage(resultImage, cvPoint(WIDTH-270, HEIGHT-10), 0.5, message);
		sprintf_s(message, "Press 'F' to flip image");
		windage::Utils::DrawTextToImage(resultImage, cvPoint(WIDTH-270, HEIGHT-25), 0.5, message);
		cvShowImage("result", resultImage);

		char ch = cvWaitKey(1);
		switch(ch)
		{
		case 'q':
		case 'Q':
			processing = false;
			break;
		case 'f':
		case 'F':
			flip = !flip;
			break;
		case ' ':
		case 's':
		case 'S':
			detector->SetThreshold(30.0);
			tracking.AttatchReferenceImage(grayImage);
			tracking.TrainingReference(SCALE_FACTOR, SCALE_STEP);
			detector->SetThreshold(threshold);
			trained = true;
			break;
		}		
	}

	capture->stop();
	capture->close();
	delete capture;
//	cvReleaseCapture(&capture);
	cvDestroyAllWindows();
}
コード例 #8
0
ファイル: Colorize.cpp プロジェクト: erebuswolf/IGVC-Code
int main( int argc, char** argv )
{
	init();
	
	/* data structure for the image */
	IplImage *img = 0;

	char filename[]="test1.jpg";

	/* load the image,
	use CV_LOAD_IMAGE_GRAYSCALE to load the image in grayscale */
	img = cvLoadImage(filename, CV_LOAD_IMAGE_COLOR );
	
	/* always check */
	if( img == 0 ) {
		fprintf( stderr, "Cannot load file %s!\n", filename);
		return 1;
	}

	/* create a window */ 
	cvNamedWindow( "image", CV_WINDOW_AUTOSIZE );
	/* display the image */  
	cvShowImage( "image", img );
	
	
	
	
	
	IplImage *colored_image=cvCreateImage(cvSize(img->width,img->height),8,3);
	BayerToColor(img,colored_image,GBRG);
	
	/* create a window */ 
	cvNamedWindow( "Colored image", CV_WINDOW_AUTOSIZE );
	/* display the image */  
	cvShowImage( "Colored image", colored_image );
	
	
	
	IplImage *bw_image=cvCreateImage(cvSize(img->width,img->height),8,1);
	
	BayerToGray(img,bw_image,GBRG);
	
	/* create a window */ 
	cvNamedWindow( "BW image", CV_WINDOW_AUTOSIZE );
	/* display the image */  
	cvShowImage( "BW image", bw_image );




	/* wait until user press a key */
	cvWaitKey(0);

	/* free memory */
	cvDestroyWindow( "image" );
	cvDestroyWindow( "Colored image" );
	cvDestroyWindow( "BW image");

	cvReleaseImage( &img );
	cvReleaseImage( &colored_image );
	cvReleaseImage( &bw_image );
	return 0;
}
コード例 #9
0
bool _stdcall opencvProcess(LPWSTR csInputPath, LPWSTR csOutputPath)
{
	char inputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csInputPath, -1, inputPath, SIZE, NULL, NULL);//wchar_t * to char
	char outputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csOutputPath, -1, outputPath, SIZE, NULL, NULL);//wchar_t * to char *

	//load image
	img = cvLoadImage(inputPath, 1);
	if(!img)
		return false;
	else 
	{
		CvSize size = cvGetSize(img); 

		int xScreen = GetSystemMetrics(SM_CXSCREEN);
		int yScreen = GetSystemMetrics(SM_CYSCREEN);
		
		while(size.width + 100 > xScreen || size.height + 100 > yScreen)
		{
			size.width /= 1.4;
			size.height /= 1.4;
		}//end while
	
		size.height += 90;

		cvNamedWindow(windowName, 0);
		cvResizeWindow(windowName, size.width, size.height); 
		cvMoveWindow(windowName, (xScreen-size.width)/2, (yScreen-size.height)/2 ); 
		
		dst = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
	
		LookupTableMatrix = cvCreateMatHeader(1,256,CV_8UC1);
		cvSetData(LookupTableMatrix, LookupTableData, 0);

		cvNamedWindow(windowName, 1);

		//read Brightness Contrast from file
		char bcPath[SIZE] = "";
		sprintf(bcPath, "%s\\InstaFilter\\Brightness and Contrast.if", getenv("temp"));
		FILE *bc = fopen(bcPath, "rb");
		if(!bc) return false;
		//read
		char data[SIZE];
		fgets(data, SIZE, bc);
		char *token = strtok(data, " ");
		BrightnessPosition += atoi(token);
		token = strtok(NULL, " ");
		ContrastPosition += atoi(token);
		fclose(bc);

		cvCreateTrackbar("亮度", windowName, &BrightnessPosition, 200, onTrackbar);
		cvCreateTrackbar("對比", windowName, &ContrastPosition, 200, onTrackbar);

		onTrackbar(0);

		cvWaitKey(0);
			
		//release
		cvSaveImage(outputPath, dst);
		cvReleaseImage(&img);
		cvReleaseImage(&dst);
		cvDestroyAllWindows();

		return true;
	}//end else
	return false;
}//end opencvProcess
コード例 #10
0
ファイル: get_field.cpp プロジェクト: p-kar/vision
int main()
{
    IplImage* img = cvLoadImage("goal_arena.bmp");
    CvSize imgSize = cvGetSize(img);
    IplImage* detected = cvCreateImage(imgSize, 8, 1);
 
    IplImage* imgBlue = cvCreateImage(imgSize, 8, 1);
    IplImage* imgGreen = cvCreateImage(imgSize, 8, 1);
    IplImage* imgRed = cvCreateImage(imgSize, 8, 1);

    cvSplit(img, imgBlue, imgGreen, imgRed, NULL);
    cvAnd(imgGreen, imgBlue, detected);
    cvAnd(detected, imgRed, detected);
    cvErode(detected, detected);
    cvDilate(detected, detected);    // Opening
 
    // cvThreshold(detected, detected, 100, 250, CV_THRESH_BINARY);
    CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
    cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);
    // CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
    // cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);

    CvPoint left1 = cvPoint(0, 0);
    CvPoint left2 = cvPoint(0, 0);
    CvPoint right1 = cvPoint(0, 0);
    CvPoint right2 = cvPoint(0, 0);
    CvPoint top1 = cvPoint(0, 0);
    CvPoint top2 = cvPoint(0, 0);
    CvPoint bottom1 = cvPoint(0, 0);
    CvPoint bottom2 = cvPoint(0, 0);
 
    int numLines = lines->rows;
    int numTop = 0;
    int numBottom = 0;
    int numLeft = 0;
    int numRight = 0;

    for(int i=0;i<numLines;i++)
    {
    	CvScalar dat = cvGet1D(lines, i);
        double rho = dat.val[0];
        double theta = dat.val[1];
        if(theta==0.0)
            continue;
        double degrees = theta*180.0/(3.1412);
 
        CvPoint pt1 = cvPoint(0, rho/sin(theta));
        CvPoint pt2 = cvPoint(img->width, (-img->width/tan(theta)) + rho/sin(theta));
         if(abs(rho)<50.0)
        {
        	if(degrees>45.0 && degrees<135.0)
            {
            	numTop++;
 
                // The line is vertical and near the top
                top1.x+=pt1.x;
                top1.y+=pt1.y;
 
                top2.x+=pt2.x;
                top2.y+=pt2.y;
            }

            else
            {
                numLeft++;
 
                // The line is vertical and near the left
                left1.x+=pt1.x;
                left1.y+=pt1.y;
 
                left2.x+=pt2.x;
                left2.y+=pt2.y;
            }
        }

        else
        {
            // We're in the right portion
            if(degrees>45.0 && degrees<135.0)
            {
                numBottom++;
 
                //The line is horizontal and near the bottom
                bottom1.x+=pt1.x;
                bottom1.y+=pt1.y;
 
                bottom2.x+=pt2.x;
                bottom2.y+=pt2.y;
            }
            else
            {
                numRight++;
 
                // The line is vertical and near the right
                right1.x+=pt1.x;
                right1.y+=pt1.y;
 
                right2.x+=pt2.x;
                right2.y+=pt2.y;
            }
        }
    }

    left1.x/=numLeft;
    left1.y/=numLeft;
    left2.x/=numLeft;
    left2.y/=numLeft;
 
    right1.x/=numRight;
    right1.y/=numRight;
    right2.x/=numRight;
    right2.y/=numRight;
 
    top1.x/=numTop;
    top1.y/=numTop;
    top2.x/=numTop;
    top2.y/=numTop;
 
    bottom1.x/=numBottom;
    bottom1.y/=numBottom;
    bottom2.x/=numBottom;
    bottom2.y/=numBottom;

    cvLine(img, left1, left2, CV_RGB(255, 0,0), 1);
    cvLine(img, right1, right2, CV_RGB(255, 0,0), 1);
    cvLine(img, top1, top2, CV_RGB(255, 0,0), 1);
    cvLine(img, bottom1, bottom2, CV_RGB(255, 0,0), 1);

    // Next, we need to figure out the four intersection points
    double leftA = left2.y-left1.y;
    double leftB = left1.x-left2.x;
    double leftC = leftA*left1.x + leftB*left1.y;
 
    double rightA = right2.y-right1.y;
    double rightB = right1.x-right2.x;
    double rightC = rightA*right1.x + rightB*right1.y;
 
    double topA = top2.y-top1.y;
    double topB = top1.x-top2.x;
    double topC = topA*top1.x + topB*top1.y;
 
    double bottomA = bottom2.y-bottom1.y;
    double bottomB = bottom1.x-bottom2.x;
    double bottomC = bottomA*bottom1.x + bottomB*bottom1.y;
 
    // Intersection of left and top
    double detTopLeft = leftA*topB - leftB*topA;
    CvPoint ptTopLeft = cvPoint((topB*leftC - leftB*topC)/detTopLeft, (leftA*topC - topA*leftC)/detTopLeft);
 
    // Intersection of top and right
    double detTopRight = rightA*topB - rightB*topA;
    CvPoint ptTopRight = cvPoint((topB*rightC-rightB*topC)/detTopRight, (rightA*topC-topA*rightC)/detTopRight);
 
    // Intersection of right and bottom
    double detBottomRight = rightA*bottomB - rightB*bottomA;
    CvPoint ptBottomRight = cvPoint((bottomB*rightC-rightB*bottomC)/detBottomRight, (rightA*bottomC-bottomA*rightC)/detBottomRight);
 
    // Intersection of bottom and left
    double detBottomLeft = leftA*bottomB-leftB*bottomA;
    CvPoint ptBottomLeft = cvPoint((bottomB*leftC-leftB*bottomC)/detBottomLeft, (leftA*bottomC-bottomA*leftC)/detBottomLeft);

    cvLine(img, ptTopLeft, ptTopLeft, CV_RGB(0,255,0), 5);
    cvLine(img, ptTopRight, ptTopRight, CV_RGB(0,255,0), 5);
    cvLine(img, ptBottomRight, ptBottomRight, CV_RGB(0,255,0), 5);
    cvLine(img, ptBottomLeft, ptBottomLeft, CV_RGB(0,255,0), 5);

    IplImage* imgMask = cvCreateImage(imgSize, 8, 3);
    cvZero(imgMask);
    CvPoint* pts = new CvPoint[4];
    pts[0] = ptTopLeft;
    pts[1] = ptTopRight;
    pts[2] = ptBottomRight;
    pts[3] = ptBottomLeft;
    cvFillConvexPoly(imgMask, pts, 4, cvScalar(255,255,255));
    cvAnd(img, imgMask, img);
 
    cvNamedWindow("Original");
    cvNamedWindow("Detected");
 
    cvShowImage("Original", img);
    cvShowImage("Detected", detected);
    cvWaitKey(0);
 
    return 0;
}
コード例 #11
0
int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
			Z += 0.5;
		if (key == 'd')
			Z -= 0.5;
	}

	//SHOW ROTATION AND TRANSLATION VECTORS
	CvMat *image_points = cvCreateMat(4, 1, CV_32FC2);
	CvMat *object_points = cvCreateMat(4, 1, CV_32FC3);
	for (int i = 0; i < 4; ++i) {
		CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
		CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) =
			cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
	}

	CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F);
	CvMat *Rot = cvCreateMat(3, 3, CV_32F);
	CvMat *Trans = cvCreateMat(3, 1, CV_32F);
	cvFindExtrinsicCameraParams2(object_points, image_points,
								 intrinsic, distortion, RotRodrigues, Trans);
	cvRodrigues2(RotRodrigues, Rot);

	//SAVE AND EXIT
	cvSave("Rot.xml", Rot);
	cvSave("Trans.xml", Trans);
	cvSave("H.xml", H);
	cvInvert(H, H_invt);
	cvSave("H_invt.xml", H_invt);	//Bottom row of H invert is horizon line
	return 0;
}
コード例 #12
0
ファイル: intersect.c プロジェクト: freeman94/vision
int main(int argc, char* argv[]) {
	char fname[100]="output";


	if (argc == 0) return usage(argv[0]);

	int processArgumentOne, processArgumentTwo; speed_up = 33;

	printf("Choose processing mode:\n");
	printf("1: Hough Transform\n");
	printf("2: Least Squares Means Linear Regression\n");
	printf("3: Singular Value Decomposition\n");
	printf("4: Factored Singular Value Decomposition\n");
	scanf("%d", &processArgumentOne);

	sprintf(fname,"output%d.csv", processArgumentOne);
	oFile=fopen(fname,"w");

	//if(*processArgument != 1 || *processArgument != 2 || *processArgument != 3) { printf("Enter a valid option\n"); return -1; }

	printf("Choose input type:\n");
	printf("1: Process input as movie file\n");
	printf("2: Process input as image in sequential series\n");
	scanf("%d", &processArgumentTwo);

	if(processArgumentTwo == 1) { gCapture = cvCreateFileCapture(argv[1]); frame = cvQueryFrame(gCapture); }
	else if (processArgumentTwo == 2) frame = cvLoadImage(argv[1]);


	cvShowImage(argv[1], red);
	initializeImages();
	cvSplit(frame, blue, green, red, NULL);
	subFrameX = .1 * width;
	subFrameY = .1 * height;
	cvNamedWindow("Initial", CV_WINDOW_AUTOSIZE);
	cvShowImage("Initial", frame);
	cvSetMouseCallback("Initial", myMouseCallback, NULL);
	cvWaitKey(0);
	cvDestroyWindow("Initial");
	cvNamedWindow("Frame Analysis", CV_WINDOW_NORMAL);
	cvSetMouseCallback("Frame Analysis", onTileSelect, NULL);
	stream = fopen("output","w");

	printf("Starting...\n");
	gettimeofday(&tStart, 0);

	int processTypeInput = processArgumentOne;
	int inputFileType = processArgumentTwo;

	switch(processTypeInput) {
		case 1:
			runHoughTransform(inputFileType);
			break;
		case 2:
			runLSMRegression(inputFileType);
			break;
		case 3:
			runSVD(inputFileType);
			break;
		case 4:
			runFactoredSVD(inputFileType);
			break;
		default:
			printf("Quit without processing\n");
			break;
	}

	gettimeofday(&tComplete, 0);
	totalRunTime = (tComplete.tv_sec - tStart.tv_sec);
	//printf("Total Processing Time: %f\n", totalProcessingTime);
	printf("Total Runtime: %f seconds\n", totalRunTime);

	cvReleaseCapture(&gCapture);
	cvDestroyWindow(argv[0]);
	return(0);
}
コード例 #13
0
int main(int argc, char **argv)
{
	int ret = 0;
	catcierge_matcher_t *matcher = NULL;
	char *img_paths[4096];
	IplImage *imgs[4096];
	size_t img_count = 0;
	IplImage *img = NULL;
	CvSize img_size;
	CvScalar match_color;
	int match_success = 0;
	double match_res = 0;
	int debug = 0;
	int i;
	int j;
	int show = 0;
	int save = 0;
	char *output_path = "output";
	double match_threshold = 0.8;
	int success_count = 0;
	int preload = 0;
	int test_matchable = 0;
	const char *matcher_str = NULL;
	match_result_t result;

	clock_t start;
	clock_t end;
	catcierge_template_matcher_args_t args;
	catcierge_haar_matcher_args_t hargs;
	char *key = NULL;
	char *values[4096];
	size_t value_count = 0;
	memset(&args, 0, sizeof(args));
	memset(&result, 0, sizeof(result));

	fprintf(stderr, "Catcierge Image match Tester (C) Joakim Soderberg 2013-2014\n");

	if (argc < 4)
	{
		fprintf(stderr, "Usage: %s\n"
						"          [--output [path]]\n"
						"          [--debug]\n"
						"          [--show]\n"
						"          [--match_flipped <0|1>]\n"
						"          [--threshold]\n"
						"          [--preload]\n"
						"          [--test_matchable]\n"
						"          [--snout <snout images for template matching>]\n"
						"          [--cascade <haar cascade xml>]\n"
						"           --images <input images>\n"
						"           --matcher <template|haar>\n", argv[0]);
		return -1;
	}

	catcierge_haar_matcher_args_init(&hargs);
	catcierge_template_matcher_args_init(&args);

	for (i = 1; i < argc; i++)
	{
		if (!strcmp(argv[i], "--show"))
		{
			show = 1;
			continue;
		}
		else if (!strcmp(argv[i], "--output"))
		{
			save = 1;

			if ((i + 1) < argc)
			{
				if (strncmp(argv[i+1], "--", 2))
				{
					i++;
					output_path = argv[i];
				}
			}
			continue;
		}
		else if (!strcmp(argv[i], "--test_matchable")
				|| !strcmp(argv[i], "--test_obstructed"))
		{
			test_matchable = 1;
			preload = 1;
		}
		else if (!strcmp(argv[i], "--debug"))
		{
			debug = 1;
		}
		else if (!strcmp(argv[i], "--images"))
		{
			while (((i + 1) < argc) 
				&& strncmp(argv[i+1], "--", 2))
			{
				i++;
				img_paths[img_count] = argv[i];
				img_count++;
			}
		}
		else if (!strcmp(argv[i], "--preload"))
		{
			if ((i + 1) < argc)
			{
				i++;
				preload = 1;
				continue;
			}
		}
		else if (!strcmp(argv[i], "--matcher"))
		{
			if ((i + 1) < argc)
			{
				if (strncmp(argv[i+1], "--", 2))
				{
					i++;
					matcher_str = argv[i];

					if (strcmp(matcher_str, "template") && strcmp(matcher_str, "haar"))
					{
						fprintf(stderr, "Invalid matcher type \"%s\"\n", matcher_str);
						return -1;
					}
				}
			}
			continue;
		}
		else if (!strncmp(argv[i], "--", 2))
		{
			int j = i + 1;
			key = &argv[i][2];
			memset(values, 0, value_count * sizeof(char *));
			value_count = 0;

			// Look for values for the option.
			// Continue fetching values until we get another option
			// or there are no more options.
			while ((j < argc) && strncmp(argv[j], "--", 2))
			{
				values[value_count] = argv[j];
				value_count++;
				i++;
				j = i + 1;
			}

			if ((ret = parse_arg(&args, &hargs, key, values, value_count)) < 0)
			{
				fprintf(stderr, "Failed to parse command line arguments for \"%s\"\n", key);
				return ret;
			}
		}
		else
		{
			fprintf(stderr, "Unknown command line argument \"%s\"\n", argv[i]);
			return -1;
		}
	}

	if (!matcher_str)
	{
		fprintf(stderr, "You must specify a matcher type\n");
		return -1;
	}

	if (!strcmp(matcher_str, "template") && (args.snout_count == 0))
	{
		fprintf(stderr, "No snout image specified\n");
		return -1;
	}

	if (!strcmp(matcher_str, "haar") && !hargs.cascade)
	{
		fprintf(stderr, "No haar cascade specified\n");
		return -1;
	}

	if (img_count == 0)
	{
		fprintf(stderr, "No input image specified\n");
		return -1;
	}

	// Create output directory.
	if (save)
	{
		catcierge_make_path("%s", output_path);
	}

	args.super.type = MATCHER_TEMPLATE;
	hargs.super.type = MATCHER_HAAR;

	if (catcierge_matcher_init(&matcher,
		(!strcmp(matcher_str, "template")
		? (catcierge_matcher_args_t *)&args
		: (catcierge_matcher_args_t *)&hargs)))
	{
		fprintf(stderr, "Failed to init %s matcher.\n", matcher_str);
			return -1;
	}

	matcher->debug = debug;
	if (!matcher->is_obstructed)
		matcher->is_obstructed = catcierge_is_frame_obstructed;
	//catcierge_set_binary_thresholds(&ctx, 90, 200);

	// If we should preload the images or not
	// (Don't let file IO screw with benchmark)
	if (preload)
	{
		for (i = 0; i < (int)img_count; i++)
		{
			printf("Preload image %s\n", img_paths[i]);

			if (!(imgs[i] = cvLoadImage(img_paths[i], 1)))
			{
				fprintf(stderr, "Failed to load match image: %s\n", img_paths[i]);
				ret = -1;
				goto fail;
			}
		}
	}

	start = clock();

	if (test_matchable)
	{
		for (i = 0; i < (int)img_count; i++)
		{
			// This tests if an image frame is clear or not (matchable).
			int frame_obstructed;

			if ((frame_obstructed = matcher->is_obstructed(matcher, imgs[i])) < 0)
			{
				fprintf(stderr, "Failed to detect check for matchability frame\n");
				return -1;
			}

			printf("%s: Frame obstructed = %d\n",
				img_paths[i], frame_obstructed);

			if (show)
			{
				cvShowImage("image", imgs[i]);
				cvWaitKey(0);
			}
		}
	}
	else
	{
		for (i = 0; i < (int)img_count; i++)
		{
			match_success = 0;

			printf("---------------------------------------------------\n");
			printf("%s:\n", img_paths[i]);

			if (preload)
			{
				img = imgs[i];
			}
			else
			{
				if (!(img = cvLoadImage(img_paths[i], 1)))
				{
					fprintf(stderr, "Failed to load match image: %s\n", img_paths[i]);
					goto fail;
				}
			}

			img_size = cvGetSize(img);

			printf("  Image size: %dx%d\n", img_size.width, img_size.height);


			if ((match_res = matcher->match(matcher, img, &result, 0)) < 0)
			{
				fprintf(stderr, "Something went wrong when matching image: %s\n", img_paths[i]);
				catcierge_matcher_destroy(&matcher);
				return -1;
			}

			match_success = (match_res >= match_threshold);

			if (match_success)
			{
				printf("  Match (%s)! %f\n", catcierge_get_direction_str(result.direction), match_res);
				match_color = CV_RGB(0, 255, 0);
				success_count++;
			}
			else
			{
				printf("  No match! %f\n", match_res);
				match_color = CV_RGB(255, 0, 0);
			}

			if (show || save)
			{
				for (j = 0; j < (int)result.rect_count; j++)
				{
					printf("x: %d\n", result.match_rects[j].x);
					printf("y: %d\n", result.match_rects[j].y);
					printf("w: %d\n", result.match_rects[j].width);
					printf("h: %d\n", result.match_rects[j].height);
					cvRectangleR(img, result.match_rects[j], match_color, 1, 8, 0);
				}

				if (show)
				{
					cvShowImage("image", img);
					cvWaitKey(0);
				}

				if (save)
				{
					char out_file[PATH_MAX]; 
					char tmp[PATH_MAX];
					char *filename = tmp;
					char *ext;
					char *start;

					// Get the extension.
					strncpy(tmp, img_paths[i], sizeof(tmp));
					ext = strrchr(tmp, '.');
					*ext = '\0';
					ext++;

					// And filename.
					filename = strrchr(tmp, '/');
					start = strrchr(tmp, '\\');
					if (start> filename)
						filename = start;
					filename++;

					snprintf(out_file, sizeof(out_file) - 1, "%s/match_%s__%s.%s", 
							output_path, match_success ? "ok" : "fail", filename, ext);

					printf("Saving image \"%s\"\n", out_file);

					cvSaveImage(out_file, img, 0);
				}
			}

			cvReleaseImage(&img);
		}
	}

	end = clock();

	if (!test_matchable)
	{
		printf("Note that this time isn't useful with --show\n");
		printf("%d of %d successful! (%f seconds)\n",
			success_count, (int)img_count, (float)(end - start) / CLOCKS_PER_SEC);
	}

fail:
	catcierge_matcher_destroy(&matcher);
	cvDestroyAllWindows();

	return ret;
}
コード例 #14
0
ファイル: IplImageWrapper.hpp プロジェクト: Xuelu91/hog3d
inline
IplImageWrapper::IplImageWrapper(std::string fileName)
	: _img(cvLoadImage(fileName.c_str())), _nRefs(new std::size_t(1)), _mask()
{
//std::cout << "IplImageWrapper::IplImageWrapper() -- address: " << _img << " nRefs: " << (*_nRefs) << std::endl;
}
コード例 #15
0
int testfaceLib_pThread ( const char* str_video, int trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster )
{
    FILE* fp_imaginfo = fopen( "imaginfo.txt", "w" );

	bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	int  sampleRate =1;
	
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	int  prob_estimate[7];
	char sState[256];
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster =  true;

	CxlibFaceAnalyzer faceAnalyzer(viewAngle, (EnumTrackerType)trackerType, blink, smile, gender, age, recog, sampleRate, str_facesetxml, recognizerType, bEnableAutoCluster); 

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256] = "";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);

	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{   // smile icon
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{   // gender/age/smile icons
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}

	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );
	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType== TRA_HAAR ? "haar" : (recognizerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play    = true;
	mouse_faceparam.ret_online_collecting = 0;

	static const int MAX_FACES = 16; 
	if(! quiet)
	{
		mouse_faceparam.play    = true;
		mouse_faceparam.updated = false;
		mouse_faceparam.face_num  = faceAnalyzer.getMaxFaceNum();
		mouse_faceparam.rects     = faceAnalyzer.getFaceRects();
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= faceAnalyzer.getBigCutFace();
		mouse_faceparam.typeRecognizer = 0;
		mouse_faceparam.faceRecognizer = &faceAnalyzer;
		mouse_faceparam.ret_online_collecting = 0;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
		faceAnalyzer.setMouseParam(&mouse_faceparam);
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks;
	double tracker_fps, total_fps; 

	start_ticks         = total_ticks  = 0;
	tracker_total_ticks = 0;
		
	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			break;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		///////////////////////////////////////////////////////////////////
		// do face tracking and face recognition
		start_ticks = ticks = cvGetTickCount();	

        if( is_piclist )
            faceAnalyzer.detect(gray_image, prob_estimate, sState);
        else
		    faceAnalyzer.track(gray_image, prob_estimate, sState, image);   // track face in each frame but recognize by pthread
		//faceAnalyzer.detect(gray_image, prob_estimate, sState);// track and recognizer face in each frame 

		int face_num = faceAnalyzer.getFaceNum();

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

		
		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

		// blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		{
			// get face rect and id from face tracker
			CvRectItem rectItem = faceAnalyzer.getFaceRect(i);
			CvRect rect = rectItem.rc;
			int    face_trackid = rectItem.fid;
			float  probSmile = faceAnalyzer.getFaceSmileProb(i);
			int    bBlink  = faceAnalyzer.getFaceBlink(i);
			int    bSmile  = faceAnalyzer.getFaceSmile(i);
			int    bGender = faceAnalyzer.getFaceGender(i);
			int    nAgeID  = faceAnalyzer.getFaceAge(i);
			int    nFaceID = faceAnalyzer.getFaceID(i);
			float  fFaceProb= faceAnalyzer.getFaceProb(i);
			
			char *sFaceCaption = NULL;
			char sFaceNameBuff[256];
			char *sFaceName = faceAnalyzer.getFaceName(i);
			if(sFaceName[0] != '\0')
			{
				sprintf(sFaceNameBuff, "%s %.2f", sFaceName, fFaceProb);
				sFaceCaption = sFaceName;
				sFaceCaption = sFaceNameBuff;
			}

			if( ! quiet )
			{
				CvPoint2D32f *landmark6 = NULL;
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

				int trackid = -1; //face_trackid , don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d", rect.x, rect.y, rect.width, rect.height );
		}
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		///////////////////////////////////////////////////////////////////
		total_ticks += (cvGetTickCount() - start_ticks);
		
		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);

			CvRectItem *rects = faceAnalyzer.getFaceRects();
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
		
		//show Image
		if (image.width() <= 800)
			cvShowImage( str_title, image );
		else
		{   // display scaled smaller aimge
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey(1);
		//int key = cvWaitKey(0);
		if( key == ' ' )     // press space bar to pause the video play
			cvWaitKey( 0 );                           
		else if( key == 27 ) // press 'esc' to exit
			break;	                                   
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = faceAnalyzer.getFaceRect(0).rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   //enable flag to collect face exemplars for the selected face name
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting face exemplars
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{
				// save faceset xml model
				faceAnalyzer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );
	
	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: %.1f ", tracker_fps);

	//save updated faceset model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceAnalyzer.saveFaceModelXML("faceset_model.xml");
	}

	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight =10; 
	faceAnalyzer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceAnalyzer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);

	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );

    return 0;
}
コード例 #16
0
ファイル: image.cpp プロジェクト: UIKit0/digikam
Image::Image(const QString& filePath)
     : d(new ImagePriv)
{
    d->image = cvLoadImage(QFile::encodeName(filePath), CV_LOAD_IMAGE_GRAYSCALE);
}
コード例 #17
0
ファイル: algo.cpp プロジェクト: lblsa/roombara
// initialize the main function
int main(int argc, char *argv[])
{
    if (argc < 2)
    {
        printf("Usage: %s <img.jpg>\n", argv[0]);
        return 1;
    }
    IplImage* picture = cvLoadImage(argv[1]);
    IplImage* greyImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 1);
    IplImage* cannyImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 1);
    IplImage* drawnImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 3);
    IplImage* contrastImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 1);
    
    cvNamedWindow("Image", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Canny", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Threshold", CV_WINDOW_NORMAL);
    
    cvCvtColor(picture, greyImg, CV_BGR2GRAY);
    cvEqualizeHist(greyImg, greyImg);
    
    CvMemStorage* storage = cvCreateMemStorage(0); 
    
    while (1) {
        
        // Create trackbars
        cvCopy(picture, drawnImg); // picture to be displayed
        
        cvCreateTrackbar( "min_dist", "Image", &min_dist_switch_value, 49, switch_min_dist );
        cvCreateTrackbar( "dp", "Image", &dp_switch_value, 9, switch_dp );
        cvCreateTrackbar( "High", "Canny", &high_switch_value, 499, switch_callback_h );
        cvCreateTrackbar( "Low", "Canny", &low_switch_value, 499, switch_callback_l );
        cvCreateTrackbar( "Threshold", "Threshold", &threshold_switch_value, 199, switch_threshold );
        cvCreateTrackbar( "Max", "Threshold", &threshold_max_switch_value, 500, switch_threshold_max );
        
        int N = 7;
        
        double dp = dpInt+1;
        double min_dist = min_distInt+1;
        double lowThresh = lowInt + 1;
        double highTresh = highInt + 1;
        double threshold = thresholdInt+1;
        double threshold_max = threshold_maxInt+1;
        
        
        cvThreshold(greyImg, contrastImg, threshold, threshold_max, CV_THRESH_TOZERO_INV);
        cvCanny(contrastImg, cannyImg, lowThresh*N*N, highTresh*N*N, N);
        
        //        CvSeq* circles =cvHoughCircles(greyImg, storage, CV_HOUGH_GRADIENT, 35, 25);
        CvSeq* circles =cvHoughCircles(cannyImg, storage, CV_HOUGH_GRADIENT, dp, min_dist); 
        // dp is image resolution
        // min_dist is the minimum distance between circles
        
        for (int i = 0; i < (circles ? circles->total : 0); i++) 
        { 
            float* p = (float*)cvGetSeqElem( circles, i ); 
            cvCircle( drawnImg, cvPoint(cvRound(p[0]),cvRound(p[1])),3, CV_RGB(0,255,0), -1, 8, 0 ); 
        } 
        
        cvShowImage("Image", drawnImg);
        cvShowImage("Canny", cannyImg);
        cvShowImage("Threshold", contrastImg);
        
        char b;
        
        while (b != 98) {
            b = cvWaitKey(1);
        }
        b=0;
    }
}  
コード例 #18
0
ファイル: Transformation.cpp プロジェクト: Xala/MAV
void Transformation::saveProblem(string path){
	printf("saveProblem(%s)\n",path.c_str());
	IplImage* rgb_img_src 	= cvLoadImage(src->input->rgb_path.c_str(),CV_LOAD_IMAGE_UNCHANGED);
	char * data_src = (char *)rgb_img_src->imageData;
	unsigned short * depth_data_src		= (unsigned short *)(src->input->depth_img->imageData);

	IplImage* rgb_img_dst 	= cvLoadImage(dst->input->rgb_path.c_str(),CV_LOAD_IMAGE_UNCHANGED);
	char * data_dst = (char *)rgb_img_dst->imageData;
	unsigned short * depth_data_dst		= (unsigned short *)(dst->input->depth_img->imageData);

	int	width = rgb_img_src->width;
	int	height = rgb_img_src->height;

	IplImage* img_combine = cvCreateImage(cvSize(2*width,height), IPL_DEPTH_8U, 3);
	char * data = (char *)img_combine->imageData;

	for (int j = 0; j < height; j++){
		for (int i = 0; i < width; i++){
			int ind = 3*(640*j+i);
			int dst_ind = 3 * (j * (2*width) + (width+i));
			int src_ind = 3 * (j * (2*width) + (i));
			int d_dst = depth_data_dst[(640*j+i)];
			int d_src = depth_data_src[(640*j+i)];
			if(d_dst == 0 && (i % 2 == 0) && (j % 2 == 0)){
				data[dst_ind + 0] = 255;
				data[dst_ind + 1] = 0;
				data[dst_ind + 2] = 255;
			}else{
				data[dst_ind + 0] = data_dst[ind +0];
				data[dst_ind + 1] = data_dst[ind +1];
				data[dst_ind + 2] = data_dst[ind +2];
			}
			if(d_src == 0 && (i % 2 == 0) && (j % 2 == 0)){
				data[src_ind + 0] = 255;
				data[src_ind + 1] = 0;
				data[src_ind + 2] = 255;
			}else{
				data[src_ind + 0] = data_src[ind +0];
				data[src_ind + 1] = data_src[ind +1];
				data[src_ind + 2] = data_src[ind +2];
			}
		}
	}
	cvReleaseImage( &rgb_img_src );
	cvReleaseImage( &rgb_img_dst );

	for(unsigned int i = 0; i < src->keypoints->valid_key_points.size(); i++){
		KeyPoint * src_kp = src->keypoints->valid_key_points.at(i);
		cvCircle(img_combine,cvPoint(src_kp->point->w			, src_kp->point->h), 3,cvScalar(0, 255, 0, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < src->keypoints->invalid_key_points.size(); i++){
		KeyPoint * kp = src->keypoints->invalid_key_points.at(i);
		cvCircle(img_combine,cvPoint(kp->point->w			, kp->point->h), 3,cvScalar(0, 255, 255, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < dst->keypoints->valid_key_points.size(); i++){
		KeyPoint * dst_kp = dst->keypoints->valid_key_points.at(i);
		cvCircle(img_combine,cvPoint(dst_kp->point->w + width	, dst_kp->point->h), 3,cvScalar(0, 255, 0, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < dst->keypoints->invalid_key_points.size(); i++){
		KeyPoint * kp = dst->keypoints->invalid_key_points.at(i);
		cvCircle(img_combine,cvPoint(kp->point->w + width	, kp->point->h), 3,cvScalar(0, 255, 255, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < matches.size() ; i++){
		KeyPoint * src_kp = matches.at(i).first;
		KeyPoint * dst_kp = matches.at(i).second;
		cvLine(img_combine,cvPoint(dst_kp->point->w  + width ,dst_kp->point->h),cvPoint(src_kp->point->w,src_kp->point->h),cvScalar(0, 0, 255, 0),1, 8, 0);
	}

	cvShowImage("combined image", img_combine);
	char buf[1024];
	sprintf(buf,"%i_%s.png",(int)matches.size(),path.c_str());
	if(!cvSaveImage(buf,img_combine)){printf("Could not save: %s\n",buf);}
	cvWaitKey(30);
	cvReleaseImage( &img_combine);
}
コード例 #19
0
ファイル: main.cpp プロジェクト: dalinhuang/iTRTest
int mainMatch(void)
{
  // Initialise capture device
  CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
  if(!capture) error("No Capture");

  // Declare Ipoints and other stuff
  IpPairVec matches;
  IpVec ipts, ref_ipts;

  // This is the reference object we wish to find in video frame
  // Replace the line below with IplImage *img = cvLoadImage("imgs/object.jpg");
  // where object.jpg is the planar object to be located in the video
  IplImage *img = cvLoadImage("imgs/object.jpg");
  if (img == NULL) error("Need to load reference image in order to run matching procedure");
  CvPoint src_corners[4] = {{0,0}, {img->width,0}, {img->width, img->height}, {0, img->height}};
  CvPoint dst_corners[4];

  // Extract reference object Ipoints
  surfDetDes(img, ref_ipts, false, 3, 4, 3, 0.004f);
  drawIpoints(img, ref_ipts);
  showImage(img);

  // Create a window
  cvNamedWindow("OpenSURF", CV_WINDOW_AUTOSIZE );

  // Main capture loop
  while( true )
  {
    // Grab frame from the capture source
    img = cvQueryFrame(capture);

    // Detect and describe interest points in the frame
    surfDetDes(img, ipts, false, 3, 4, 3, 0.004f);

    // Fill match vector
    getMatches(ipts,ref_ipts,matches);

    // This call finds where the object corners should be in the frame
    if (translateCorners(matches, src_corners, dst_corners))
    {
      // Draw box around object
      for(int i = 0; i < 4; i++ )
      {
        CvPoint r1 = dst_corners[i%4];
        CvPoint r2 = dst_corners[(i+1)%4];
        cvLine( img, cvPoint(r1.x, r1.y),
          cvPoint(r2.x, r2.y), cvScalar(255,255,255), 3 );
      }

      for (unsigned int i = 0; i < matches.size(); ++i)
        drawIpoint(img, matches[i].first);
    }

    // Draw the FPS figure
    drawFPS(img);

    // Display the result
    cvShowImage("OpenSURF", img);

    // If ESC key pressed exit loop
    if( (cvWaitKey(10) & 255) == 27 ) break;
  }

  // Release the capture device
  cvReleaseCapture( &capture );
  cvDestroyWindow( "OpenSURF" );
  return 0;
}
コード例 #20
0
ファイル: main.cpp プロジェクト: ytsutano/bookscan
int main(int argc, char **argv)
{
    // Configure left page.
    std::map<int, CvPoint2D32f> left_dst_markers;
    left_dst_markers[0] = cvPoint2D32f(0.00, 0.00);
    left_dst_markers[1] = cvPoint2D32f(6.00, 0.00);
    left_dst_markers[2] = cvPoint2D32f(6.00, 9.50);
    left_dst_markers[3] = cvPoint2D32f(0.00, 9.50);
    LayoutInfo left_layout;
    left_layout.page_left = 0.50;
    left_layout.page_top = 0.25;
    left_layout.page_right = 6.30;
    left_layout.page_bottom = 9.20;
    left_layout.dpi = 600.0;

    // Configure right page.
    std::map<int, CvPoint2D32f> right_dst_markers;
    right_dst_markers[4] = cvPoint2D32f(0.00, 0.00);
    right_dst_markers[5] = cvPoint2D32f(6.00, 0.00);
    right_dst_markers[6] = cvPoint2D32f(6.00, 9.50);
    right_dst_markers[7] = cvPoint2D32f(0.00, 9.50);
    LayoutInfo right_layout;
    right_layout.page_left = -0.30;
    right_layout.page_top = 0.25;
    right_layout.page_right = 5.50;
    right_layout.page_bottom = 9.20;
    right_layout.dpi = 600.0;

    // Process if an input image is supplied; otherwise, open a webcam for
    // debugging.
    if (argc > 3) {
        IplImage *src_img = cvLoadImage(argv[1]);
        if (src_img == NULL) {
            std::cerr << "Failed to load the source image specified.\n";
            return 1;
        }

        BookImage book_img(src_img);

        IplImage *left_img
                = book_img.create_page_image(left_dst_markers, left_layout);
        if (left_img != NULL) {
            cvSaveImage(argv[2], left_img);
            cvReleaseImage(&left_img);
        }

        IplImage *right_img
                = book_img.create_page_image(right_dst_markers, right_layout);
        if (right_img != NULL) {
            cvSaveImage(argv[3], right_img);
            cvReleaseImage(&right_img);
        }

        cvReleaseImage(&src_img);
    } else {
        // Create windows.
        cvNamedWindow("Source", 0);
        cvResizeWindow("Source", 480, 640);

        left_layout.dpi = 100;
        right_layout.dpi = 100;

        // Open webcam.
        CvCapture* capture = cvCreateCameraCapture(0);
        if (!capture) {
            std::cerr << "Failed to load the camera device.\n";
            return 1;
        }
        const double scale = 1.0;
        cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 1600 * scale);
        cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 1200 * scale);

        while (cvWaitKey(10) < 0) {
            IplImage *src_img = cvQueryFrame(capture);
            cvShowImage("Source", src_img);
            process_image(src_img,
                    left_dst_markers, left_layout,
                    right_dst_markers, right_layout);
        }
    }

    return 0;
}
コード例 #21
0
int main(int argc, char* argv[])
{
    if (argc != 2)
    {
        printf("\n--Argument error!\n--Please specify input/output directory. ");
        printf("(e.g. AddNewFace.exe faces_to_find)\n");
        return -1;
    }
    //description
    printf("* This program is used to ADD NEW FACE images to be found.\n");
    printf("* Transfer face images to FEATURE DATA, and store in file 'feature.dat'.\n");
    printf("* Face image should be better named as PERSON's name.\n");
    printf("Press any key to continue...");
    getchar();

    //load face cascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return -1;
    }
    storage = cvCreateMemStorage(0);

    //find all images
    string strFilePath(argv[1]);
    strFilePath.append("\\*");
    WIN32_FIND_DATAA FileData;
    HANDLE hFind;
    hFind = FindFirstFileA(strFilePath.c_str(), &FileData);
    if (hFind == INVALID_HANDLE_VALUE)
    {
        printf ("\n--Invalid File Handle. GetLastError reports %d/n", GetLastError ());
        return -1;
    }
    //get image names
    vector<string> fileNames;
    do
    {
        //eliminate directories
        //if(!strcmp(FileData.cFileName,"..") || !strcmp(FileData.cFileName,"."))
        if(FileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY)
            continue;
        fileNames.push_back(string(FileData.cFileName));
    } while (FindNextFileA(hFind, &FileData));
    //FindClose(&hFind);

    //file to store feature data
    ofstream featureDataFile;
    char dataFileName[256];
    sprintf(dataFileName, "%s\\%s", argv[1], feature_file_name);
    featureDataFile.open(dataFileName, ofstream::out);
    //class object of processing
    CFaceRecognize* pFaceRec = new CFaceRecognize;
    //process images
    for (size_t i = 0; i < fileNames.size(); i++)
    {
        string fileName = fileNames[i];
        int index = fileName.rfind(".");
        string name = fileName.substr(0, index);	//image name, i.e. person's name
        string extend = fileName.substr(index);		//extend name

        //skip feature data file
        if (extend == ".dat")
            continue;

        printf("\nprocessing image: %s.", fileName.c_str());

        //1--load image
        char filePath[256];
        sprintf(filePath, "%s\\%s", argv[1], fileName.c_str());
        IplImage* srcImg = cvLoadImage(filePath, 1);		//rgb
        IplImage* dstImg = 0;
        CvRect roi;
        if( srcImg )
        {
            //get key parts of face
            bool res = detect_and_draw( srcImg, roi);
            if (!res)	continue;

            //use roi to crop image
            dstImg = cvCreateImage(cvSize(roi.width,roi.height),srcImg->depth,srcImg->nChannels);
            cvSetImageROI(srcImg, roi);
            cvCopy(srcImg, dstImg);
            cvResetImageROI(srcImg);
            cvReleaseImage( &srcImg );
        }
        else
        {
            printf("--Error loading source image!\n");
            continue;
        }

        //2--standard image
        IplImage* standImage = pFaceRec->StandardImage(dstImg);
        cvShowImage("standard face",standImage);
        cvWaitKey(30);

        //3--generate LGBPHS data
        vector<int> LGBPHist;
        pFaceRec->GetLGBPHS(standImage, LGBPHist);

        //4--write into feature data file
        size_t imm = 0;
        featureDataFile<<name<<":";
        for (; imm<LGBPHist.size()-1; imm++)
            featureDataFile<<LGBPHist[imm]<<",";
        featureDataFile<<LGBPHist[imm]<<"\n";
    }

    cvReleaseMemStorage(&storage);

    printf("\n\nAll finished...\n");
    //system("pause");
    return 0;
}
コード例 #22
0
ファイル: search.c プロジェクト: vstabile/RA
int main( int argc, char** argv )
{
  IplImage* img1, * img2;
  struct feature* feat1, * feat2, * feat;
  struct feature** nbrs;
  struct kd_node* kd_root;
  CvPoint pt1, pt2;
  double d0, d1;
  int n1, n2, k, i, m = 0;

  if( argc != 3 )
    fatal_error( "usage: %s <img1> <img2>", argv[0] );
  
  img1 = cvLoadImage( argv[1], 1 );
  if( ! img1 )
    fatal_error( "unable to load image from %s", argv[1] );
  img2 = cvLoadImage( argv[2], 1 );
  if( ! img2 )
    fatal_error( "unable to load image from %s", argv[2] );

  fprintf( stderr, "Finding features in %s...\n", argv[1] );
  n1 = sift_features( img1, &feat1 );
  fprintf( stderr, "Finding features in %s...\n", argv[2] );
  n2 = sift_features( img2, &feat2 );
  fprintf( stderr, "Building kd tree...\n" );
  kd_root = kdtree_build( feat2, n2 );
  for( i = 0; i < n1; i++ )
    {
      feat = feat1 + i;
      k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
      if( k == 2 )
	{
	  d0 = descr_dist_sq( feat, nbrs[0] );
	  d1 = descr_dist_sq( feat, nbrs[1] );
	  if( d0 < d1 * NN_SQ_DIST_RATIO_THR )
	    {
	      pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) );
	      pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );
	      pt2.y += img1->height;
	      m++;
	      feat1[i].fwd_match = nbrs[0];
	    }
	}
      free( nbrs );
    }

  fprintf( stderr, "Found %d total matches\n", m );
  cvWaitKey( 0 );

  /* 
     UNCOMMENT BELOW TO SEE HOW RANSAC FUNCTION WORKS
     
     Note that this line above:
     
     feat1[i].fwd_match = nbrs[0];
     
     is important for the RANSAC function to work.
  */
  /*
  {
    CvMat* H;
    IplImage* xformed;
    H = ransac_xform( feat1, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.01,
		      homog_xfer_err, 3.0, NULL, NULL );
    if( H )
      {
	xformed = cvCreateImage( cvGetSize( img2 ), IPL_DEPTH_8U, 3 );
	cvWarpPerspective( img1, xformed, H, 
			   CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS,
			   cvScalarAll( 0 ) );
	cvNamedWindow( "Xformed", 1 );
	cvShowImage( "Xformed", xformed );
	cvWaitKey( 0 );
	cvReleaseImage( &xformed );
	cvReleaseMat( &H );
      }
  }
  */

  cvReleaseImage( &img1 );
  cvReleaseImage( &img2 );
  kdtree_release( kd_root );
  free( feat1 );
  free( feat2 );
  return 0;
}
コード例 #23
0
ファイル: main.cpp プロジェクト: iaml/Video-Stylization
int main(int argc, char *argv[])
{
    if (argc < 6)
    {
        printf("Usage: image mask start end output\n");
        return -1;
    }

    IplImage *srcMask, *dstMask;
    char *imgPattern = argv[1];
    char *maskPattern = argv[2];
    int s = atoi(argv[3]);
    int e = atoi(argv[4]);
    char *output = argv[5];
    char name[256];
    int imgCount;
    BiImage sift1, sift2, src, dst, tmp;
    
    snprintf(name, 256, imgPattern, s);
    ImageIO::LoadImage(name, src);
    ExtractSIFT(src, sift1);

    snprintf(name, 256, maskPattern, s);
    srcMask = cvLoadImage(name, CV_LOAD_IMAGE_GRAYSCALE);

    int width = src.width();
    int height = src.height();
    int channels = src.nchannels();
    IntImage fv(0, width, height, 2);

    for (imgCount = s + 1; imgCount <= e; imgCount++)
    {
        snprintf(name, 256, imgPattern, imgCount);
        printf("processing %s...\n", name);

        ImageIO::LoadImage(name, dst);
        ExtractSIFT(dst, sift2);

        snprintf(name, 256, maskPattern, imgCount);
        dstMask = cvLoadImage(name, CV_LOAD_IMAGE_GRAYSCALE);

        // layer flow
        snprintf(name, 256, "%s_%d", output, imgCount);
        LayerFlow(sift1, sift2, fv, srcMask, dstMask, name);

        // save flow vector to image
        tmp.setValue(0, width, height, channels);
        FlowColorImage(fv, tmp);
        
        snprintf(name, 256, "%s_fv_%d.bmp", output, imgCount);
        ImageIO::WriteImage(name, tmp);
        tmp.clear();
        
        // save flow vector to file
        snprintf(name, 256, "%s_fv_%d.dat", output, imgCount);
        SaveAsFile(fv.data(), width, height, 2, name);

        // warp image
        tmp.setValue(0, width, height, 3);
        WarpImage(src, fv, tmp);
        snprintf(name, 256, "%s_warp_nobg_%d.bmp", output, imgCount);
        ImageIO::WriteImage(name, tmp);
        tmp.clear();
        
        WarpImage(src, fv, dst);
        src = dst;
        snprintf(name, 256, "%s_warp_%d.bmp", output, imgCount);
        ImageIO::WriteImage(name, src);
        dst.clear();
        
        // warp sift image
        WarpImage(sift1, fv, sift2);
        sift1 = sift2;
        sift2.clear();
        
        cvReleaseImage(&srcMask);
        srcMask = cvCloneImage(dstMask);
        cvReleaseImage(&dstMask);
    }

    cvReleaseImage(&srcMask);

	return 0;
}
コード例 #24
0
ファイル: Exercise5.3.c プロジェクト: CarlChenCC/examples
int main (int argc, const char * argv[])
{
	if ( argc != 2 ) {
		fprintf(stderr, "Expected image filename.\n");
		exit(1);
	}
	
	IplImage* image = cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR);
	
	if ( image == NULL ) {
		fprintf(stderr, "Couldn't load file %s\n", argv[1]);
		exit(1);
	}
	
	// part a
	IplImage* sigma1 = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	IplImage* sigma4 = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	IplImage* sigma6 = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	
	cvSmooth(image, sigma1, CV_GAUSSIAN, 9, 9, 1, 0);
	cvSmooth(image, sigma4, CV_GAUSSIAN, 9, 9, 4, 0);
	cvSmooth(image, sigma6, CV_GAUSSIAN, 9, 9, 6, 0);
	
	cvNamedWindow("Original", CV_WINDOW_NORMAL);
	cvShowImage("Original", image);
	cvNamedWindow("asigma1", CV_WINDOW_NORMAL);
	cvShowImage("asigma1", sigma1);
	cvNamedWindow("asigma4", CV_WINDOW_NORMAL);
	cvShowImage("asigma4", sigma4);
	cvNamedWindow("asigma6", CV_WINDOW_NORMAL);
	cvShowImage("asigma6", sigma6);
	cvWaitKey(0);
	
	// part b
	IplImage* sigma1_2 = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	IplImage* sigma4_2 = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	IplImage* sigma6_2 = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	cvSmooth(image, sigma1_2, CV_GAUSSIAN, 0, 0, 1, 0);
	cvSmooth(image, sigma4_2, CV_GAUSSIAN, 0, 0, 4, 0);
	cvSmooth(image, sigma6_2, CV_GAUSSIAN, 0, 0, 6, 0);
	cvNamedWindow("bsigma12", CV_WINDOW_NORMAL);
	cvShowImage("bsigma12", sigma1_2);
	cvNamedWindow("bsigma42", CV_WINDOW_NORMAL);
	cvShowImage("bsigma42", sigma4_2);
	cvNamedWindow("bsigma62", CV_WINDOW_NORMAL);
	cvShowImage("bsigma62", sigma6_2);
	cvWaitKey(0);
	
	// part c
	IplImage* csigma1_9 = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	cvSmooth(image, csigma1_9, CV_GAUSSIAN, 0, 0, 1, 9);
	cvNamedWindow("csigma1_9", CV_WINDOW_NORMAL);
	cvShowImage("csigma1_9", csigma1_9);
	cvWaitKey(0);
	
	// part d
	IplImage* csigma9_1 = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	cvSmooth(image, csigma9_1, CV_GAUSSIAN, 0, 0, 9, 1);
	cvNamedWindow("csigma9_1", CV_WINDOW_NORMAL);
	cvShowImage("csigma9_1", csigma9_1);
	cvWaitKey(0);
	
	// part e
	IplImage* esigma = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	cvSmooth(image, esigma, CV_GAUSSIAN, 0, 0, 1, 9);
	cvSmooth(esigma, esigma, CV_GAUSSIAN, 0, 0, 9, 1);
	cvNamedWindow("esigma", CV_WINDOW_NORMAL);
	cvShowImage("esigma", esigma);
	cvWaitKey(0);
	
	// part f
	IplImage* fsigma = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
	cvSmooth(image, fsigma, CV_GAUSSIAN, 9, 9, 0, 0);
	cvNamedWindow("fsigma", CV_WINDOW_NORMAL);
	cvShowImage("fsigma", fsigma);
	cvWaitKey(0);

	// Should call cvReleaseImage if this wasn't going to exit.
	
    return 0;
}
コード例 #25
0
//--------THIS FUNCTION PERFORMS A TEMPLATE MATCH ON THE ORIGINAL IMAGE TO CROP THE IMAGE FROM BACKGROUND-----------
//-------IN ORDER TO IDENTIFY THE REGION WHERE THE NOTE IS PRESENT----------------
float NotetplMatch(IplImage *img1)
{

	
	//------------NotetplProcessed STORES THE IMAGE EXTRACTED FROM BACKGROUND-------------
	printf("\nNoteTplMatch");
	NotetplProcessed=cvCreateImage(cvGetSize(img1),8,3);

	//-------VARIABLES USED FOR TEMPLATE MATCHING----------
	IplImage *img;
	IplImage	*tpl;
	IplImage	*res;
	CvPoint		minloc, maxloc;
	double		minval, maxval;
	int			img_width, img_height;
	int			tpl_width, tpl_height;
	int			res_width, res_height;
	float maxi=-1.0;
	int index=0;
	//cvNamedWindow( "reference", CV_WINDOW_AUTOSIZE );
	//cvNamedWindow( "template", CV_WINDOW_AUTOSIZE );
	//cvNamedWindow("result",CV_WINDOW_AUTOSIZE);
	
	//-------TEMPORARY IMAGE-COLOR--------------
	img=cvCreateImage(cvGetSize(img1),8,3);

	//-----------THIS LOOP MATCHES THE IMAGE WITH 9 DIFFERENT TEMPLATES AS STORED IN THE GLOBAL 
	// (continued)VARIBLE Notetpl[]----------------
	for(int km=0;km<8;km++)
	{
		cvCopy(img1,img,NULL);
		tpl = cvLoadImage(Notetpl[km]);;
		img_width  = img->width;
		img_height = img->height;
		tpl_width  = tpl->width;
		tpl_height = tpl->height;
		res_width  = img_width - tpl_width + 1;
		res_height = img_height - tpl_height + 1;


		/* create new image for template matching computation */
		res = cvCreateImage( cvSize( res_width, res_height ), IPL_DEPTH_32F, 1 );
		/* choose template matching method to be used */
		//cvMatchTemplate( img, tpl, res, CV_TM_SQDIFF );
		//cvMatchTemplate( img, tpl, res, CV_TM_SQDIFF_NORMED );
		//cvMatchTemplate( img, tpl, res, CV_TM_CCORR );
		//cvMatchTemplate( img, tpl, res, CV_TM_CCORR_NORMED );
		//cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF );
		cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF_NORMED );
		cvMinMaxLoc( res, &minval, &maxval,&minloc,&maxloc, 0);
		/* draw red rectangle */
		cvRectangle( img, 
				 cvPoint( maxloc.x, maxloc.y ), 
				 cvPoint( maxloc.x + tpl_width, maxloc.y + tpl_height ),
				 cvScalar( 0, 0, 255, 0 ), 1, 0, 0 );	
		CvScalar s;
			
	//	printf("\nminval= %f  maxval= %f  ",minval,maxval);

		//------AS WE NEED ONLY HALF PART(RIGHT PART ) OF NOTE WHERE GANDHIJI IS PRESENT THIS LOOP ONLY BRINGS IN FOCUS THAT AREA 
		// (continued) OTHER PORTION IS BLACKENED--------
		for(int i=1;i<img_height;i++)
		{
			for(int j=1;j<img_width;j++)
			{

				if((j<maxloc.x+(tpl_width/2) || j>maxloc.x + tpl_width)  || (i<maxloc.y || i>maxloc.y + tpl_height))
				{	 
					s = cvGet2D(img, i, j);
					s.val[0]=0.0;
					s.val[1]=0.0;
					s.val[2]=0.0;
				
					cvSet2D(img, i, j, s );

				}
			

			}

		}

		//---------COMPARES AMONGST NINE TEMPLATES WHICH IS MAX MATCHED TEMPLATE--------
		if(maxval>maxi)
		{
				maxi=maxval;
				cvCopyImage(img,NotetplProcessed);
				
		}
	

		//cvShowImage( "reference", img );
		//cvShowImage( "template", tpl );
		//cvShowImage("result",res);
		//cvWaitKey(100);	
	}
	//printf("\n\nWaiting for next image to load\n");
	//cvDestroyWindow( "reference" );
	//cvDestroyWindow( "template" );
	//cvDestroyWindow( "result" );


	return maxi;
}
コード例 #26
0
void Volume::loadPGMData(char *path, int firstSlice, int lastSlice) 
{

	int maxSize = lastSlice;

	IplImage *img = 0;
	IplImage *smallerImg = cvCreateImage(cvSize(maxSize, maxSize), IPL_DEPTH_8U, 3);

	char currentPath[1000];
	char imageNumber[10];
	int imageStep, grayValue, indexGlobalImage;
	
	for(int image = firstSlice; image <= lastSlice; image++)
	{

		if(image < 10)
			sprintf(imageNumber, "-%s%d", "000", image);
		else if(image < 100)
			sprintf(imageNumber, "-%s%d", "00", image);
		else if(image < 1000)
			sprintf(imageNumber, "-%s%d", "0", image);
		sprintf(currentPath, "%s%s.pgm", path, imageNumber);
		
		img = cvLoadImage(currentPath);
		//cvResize(img, smallerImg);
		//img = smallerImg;

		if(image == firstSlice) 
		{
			width = img->width;
			height = img->height;
			depth = (lastSlice - firstSlice + 1);
			data = new unsigned char[width * height * depth * 4];
			imageStep = width * height * 4;
			/*
			volumeData.resize(width);
			for(int x = 0; x < width; x++) {
				volumeData[x].resize(height);
				for(int y = 0; y < height; y++)
					volumeData[x][y].resize(depth);
			}
			*/
		}

		int x, y;
		for(int pixel = 0; pixel < img->width * img->height; pixel++)
		{
			x = pixel % width;
			y = pixel / width;
			grayValue = img->imageData[pixel * 3 + 0];
			float opacity = (float)(grayValue/255.f);
			//volumeData[x][y][image - firstSlice] = grayValue;
			data[(image - firstSlice) * imageStep + pixel * 4 + 0] = grayValue * opacity;
			data[(image - firstSlice) * imageStep + pixel * 4 + 1] = grayValue * opacity;
			data[(image - firstSlice) * imageStep + pixel * 4 + 2] = grayValue * opacity;
			data[(image - firstSlice) * imageStep + pixel * 4 + 3] = grayValue;
		}

	}

}
コード例 #27
0
int main(int argc, char* argv[])
{
	if(argc!=2){
		printf("error parameters\n");	
		exit(1);
	}

	int i;
	char c;

	system("ls ./Image > listImages.txt");
	FILE* file = fopen("./listImages.txt","r");

	char line[64] = "";
	char nameImage[64] = "";
	char pathImage[64] = "";
	char pathXML[64] = "";

	IplImage* frame = cvCreateImage( cvSize(176, 144 ), IPL_DEPTH_8U, 1 );

	sprintf(pathXML,"./XML/%s.xml",argv[1]);

	cv::CascadeClassifier *classifier;
	classifier = new cv::CascadeClassifier();
	classifier->load(pathXML);
	std::vector<cv::Rect> rect;

	if(file != NULL){
		while(fgets(line,64,file) != NULL) {
			
			for(i=0;i<64;i++)
				nameImage[i] = '\0';
		
			i = 0;
			c = line[i];
			while(c!=10){ 				//end of line
				nameImage[i] = line[i];
				c = line[++i];
			}		
		
			sprintf(pathImage,"./Image/%s",nameImage);

			frame = cvLoadImage(pathImage);
		
			classifier->detectMultiScale(frame,rect,1.1,3,1);

			for(i = 0; i<rect.size(); i++){
				CvPoint pt1 = { rect[i].x, rect[i].y };
				CvPoint pt2 = { rect[i].x + rect[i].width, rect[i].y + rect[i].height };
				cvRectangle(frame, pt1, pt2, CV_RGB(255,0,0), 1, 1, 0);
			}
			cvShowImage("image",frame);
		
			c = cvWaitKey(0);
			}
		}

		cvDestroyAllWindows();
	
	return 0;
}
コード例 #28
0
int testfaceLib_sThread ( const char* str_video, int  trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster)
{
	int  faceimgID = 0;
	char driver[8];
	char dir[1024];
	char fname[1024];
	char ext[8];
	char sImgPath[1024];

	if(sfolder)
	{
		char sysCommand[128];
		sprintf (sysCommand, "mkdir %s", sfolder);
		system (sysCommand);

		sprintf(sImgPath, "%s//%s", sfolder,  "imaginfo.txt");
		sprintf(fname,   "%s//%s", sfolder,  "faceinfo.txt");
	}
	else
	{
		sprintf(sImgPath, "%s", "imaginfo.txt");
		sprintf(fname,   "%s", "faceinfo.txt");
	}

	FILE* fp_imaginfo = fopen( sImgPath, "wt" );
    FILE* fp_faceinfo = fopen( fname, "wt" );

    bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256]="";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);
	
	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}
	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );

	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType == TRA_HAAR ? "haar" : (trackerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// config face tracker
	const int  face_max = 16;
	CvRectItem rects[face_max];
	
	tagDetectConfig configParam;
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;

	CxlibFaceDetector detector;
	detector.init(viewAngle, (EnumFeaType)trackerType);
	detector.config( configParam );

	CxlibFaceTracker tracker;
	tracker.init(viewAngle, (EnumTrackerType)trackerType);
	tracker.config( configParam, TR_NLEVEL_3 );

	if( cvGetErrStatus() < 0 )
	{
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// config landmark detector
	CvPoint2D32f   landmark6[6+1]; // consider both 6-pt and 7-pt
	float          parameters[16];
	bool      bLandmark = false;
	CxlibLandmarkDetector landmarkDetector(LDM_6PT);

	int size_smallface = 64;
	int size_bigface   = 128;
	CxlibAlignFace cutFace(size_smallface, size_bigface);
	
	// config blink/smile/gender detector
	int    bBlink = 0, bSmile = 0, bGender = 0, bAge = 0;  //+1, -1, otherwise 0: no process 
	float  probBlink = 0, probSmile = 0, probGender = 0, probAge[4];
	int    nAgeID = 0;

	CxlibBlinkDetector  blinkDetector(size_smallface);
	CxlibSmileDetector  smileDetector(size_smallface);
	CxlibGenderDetector genderDetector(size_smallface);
	CxlibAgeDetector    ageDetector(size_bigface);

	// config face recognizer
	float probFaceID = 0;
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	CxlibFaceRecognizer faceRecognizer( size_bigface, recognizerType );
	if(recog) faceRecognizer.loadFaceModelXML(str_facesetxml);
	
	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play = true;
	mouse_faceparam.ret_online_collecting = 0;
		
	if(! quiet)
	{
		mouse_faceparam.face_num  = face_max;
		mouse_faceparam.rects     = rects;
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= cutFace.getBigCutFace();
		mouse_faceparam.typeRecognizer = 1;
		mouse_faceparam.faceRecognizer = &faceRecognizer;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks, landmark_total_ticks, align_total_ticks,
		   blink_total_ticks, smile_total_ticks, gender_total_ticks, age_total_ticks, recg_total_ticks;
	double frame_fps, tracker_fps, landmark_fps, align_fps, blink_fps, smile_fps, gender_fps, age_fps, recg_fps, total_fps; 

	start_ticks         = total_ticks          = 0;
	tracker_total_ticks = landmark_total_ticks = align_total_ticks  = 0;
	blink_total_ticks   = smile_total_ticks    = gender_total_ticks = age_total_ticks = recg_total_ticks = 0;

	tracker_fps = landmark_fps = align_fps = blink_fps = smile_fps = gender_fps = age_fps = recg_fps = total_fps = 0.0;        

	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;
	int   print_faceid=-1;
	float print_score = 0;
	std::string  print_facename;

	bool bRunLandmark = blink || smile|| gender|| age|| recog || saveface;
	IplImage *thumbnailImg   = cvCreateImage(cvSize(THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT), IPL_DEPTH_8U, 3);   
	
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster = true;
	if( is_piclist ) bEnableAutoCluster = false;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			continue;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		// do face tracking
		start_ticks = ticks = cvGetTickCount();	
       
		int face_num = 0;
        if( is_piclist )
            face_num = detector.detect( gray_image, rects, face_max );
        else
            face_num = tracker.track( gray_image, rects, face_max, image ); // track in a video for faster speed
		  //face_num = tracker.detect( gray_image, rects, face_max ); // detect in an image

		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

        // blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		//for( int i=0; i< MIN(1,face_num); i++ )
		{
			// get face rect and id from face tracker
			CvRect rect = rects[i].rc;

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d %f", rect.x, rect.y, rect.width, rect.height, rects[i].prob );

			int    face_trackid = rects[i].fid;
			float  like = rects[i].prob;
			int    angle= rects[i].angle;

			// filter out outer faces
			if( rect.x+rect.width  > gray_image.width()   || rect.x < 0 ) continue;
			if( rect.y+rect.height > gray_image.height() || rect.y < 0 ) continue;

			//tracker.getThumbnail(image, rect, thumbnailImg);

			// detect landmark points 
			ticks = cvGetTickCount();	

			if(bRunLandmark)
			{
                if( is_piclist )
				    bLandmark = landmarkDetector.detect( gray_image, &rect, landmark6, parameters, angle ); //detect in an image
                else
				    bLandmark = landmarkDetector.track( gray_image, &rect, landmark6, parameters, angle ); // track in a video for faster speed

				ticks = cvGetTickCount() - ticks;
				landmark_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
				landmark_total_ticks += ticks;
			}
			else
				bLandmark = false;

	
			if(quiet == false && bLandmark == false) 
			{
				//DrawFaceRect
				cxlibDrawFaceRect(image, rect);
				continue;
			}

			// warped align face and hist eq to delighting
			ticks = cvGetTickCount();	

			cutFace.init(gray_image, rect, landmark6);

			ticks = cvGetTickCount() - ticks;
			if(ticks > 1)
				align_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
			else
			{	align_fps = 0;
				ticks = 0;
			}
			align_total_ticks += ticks;

			if(saveface)   //save face icon for training later
			{
				//save cutfaces
				if(sfolder)
				{
#ifdef WIN32
					_splitpath(vidcap->filename(),driver,dir,fname,ext);
					sprintf(sImgPath, "%s//%s%s", sfolder, fname,ext);
#else
					sprintf(sImgPath, "%s//%06d.jpg", sfolder, faceimgID++);
#endif
				}
				else
					sprintf(sImgPath, "%s#.jpg", vidcap->filename());
				
				cvSaveImage(sImgPath, cutFace.getBigCutFace());
			}

			// detect blink
			bBlink = 0;	
			probBlink = 0;
			if(blink && bLandmark)
			{
				ticks = cvGetTickCount();	
				float blink_threshold = blinkDetector.getDefThreshold();//0.5;
				int ret = blinkDetector.predict( &cutFace, &probBlink);
			
				if(probBlink > blink_threshold )
					bBlink = 1;  //eye close
				else 
					bBlink = -1; //eye open

				ticks = cvGetTickCount() - ticks;
				blink_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				blink_total_ticks += ticks;

				print_score = probBlink;
			}
			else blink_fps = 0;

			// detect smile
			bSmile    = 0;	
			probSmile = 0;
			if ( smile && bLandmark )
			{	
				ticks = cvGetTickCount();
				float smile_threshold = smileDetector.getDefThreshold(); //0.48;  
				int ret = smileDetector.predict(&cutFace, &probSmile);

				if(probSmile > smile_threshold)
					bSmile = 1;  //smile
				else 
					bSmile = -1; //not smile

				ticks	  = cvGetTickCount() - ticks;
				smile_fps = 1000.0 /( 1e-3 * ticks / cvGetTickFrequency() );
				smile_total_ticks += ticks;

				print_score = probSmile;
			}
			else smile_fps = 0;

			//detect gender
			bGender    = 0;	
			probGender = 0;
			if(gender && bLandmark)
			{
				ticks = cvGetTickCount();	
				float gender_threshold = genderDetector.getDefThreshold(); // 0.42; 
				int ret = genderDetector.predict(&cutFace, &probGender);

				if(probGender > gender_threshold)
					bGender =  1; //female
				else
					bGender = -1; //male

				//bGender = -1:male, 1:female, 0: null
				// smooth prediction result
                if( ! is_piclist )
				    bGender = genderDetector.voteLabel(face_trackid, bGender);
				
				ticks = cvGetTickCount() - ticks;
				gender_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				gender_total_ticks += ticks;

				print_score = probGender; 
			}
			else gender_fps = 0;

			//detect age
			nAgeID  = -1;
			if(age && bLandmark && rect.width*rect.height > 40*40)
			{
				ticks = cvGetTickCount();	

				//nAgeID = 0:"Baby", 1:"Kid", 2:"Adult", 3:"Senior"
				nAgeID = ageDetector.predict(&cutFace, probAge);

				// smooth prediction result
                if( ! is_piclist )
				    nAgeID = ageDetector.voteLabel(face_trackid, nAgeID); 

				ticks = cvGetTickCount() - ticks;
				age_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				age_total_ticks += ticks;

				print_score = probAge[nAgeID]; 
				//if( ! quiet )	cxDrawAignFace2Image(image, pCutFace2);
			}
			else 
			{
				age_fps = 0;
			}

			// recognize the face id
			// we only do recognition every 5 frames,interval
			char  *sFaceCaption = NULL;
			char  sFaceCaptionBuff[256];
            int face_id = 0;
			probFaceID = 0;
			if ( recog && bLandmark )
			{
				ticks = cvGetTickCount();
				float face_threshold = faceRecognizer.getDefThreshold(); 
				/////////////////////////////////////////////////////////////////////////////////////////
				int face_id  = -1;
				if(bEnableAutoCluster & !is_piclist)
				{
					bool bAutocluster = true;
					if(mouse_faceparam.ret_online_collecting) bAutocluster = false;
					//face clustering
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID, bAutocluster, face_trackid, frames);
				}
				else//face recognition
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID);
				/////////////////////////////////////////////////////////////////////////////////////////

				ticks    = cvGetTickCount() - ticks;
				recg_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );
				recg_total_ticks += ticks;
				
				// smooth prediction result
                if( ! is_piclist && !bEnableAutoCluster)
                {
				    if(probFaceID > face_threshold*1.0)
					    face_id = faceRecognizer.voteLabel(face_trackid, face_id); 
				    else
					    face_id = faceRecognizer.voteLabel(face_trackid, -1);
                }
				else if(probFaceID <= face_threshold)
				{
					face_id =-1;
				}

				//set face name caption
				if(face_id >= 0)
				{
					// recognized face name
					const char* sFaceName = faceRecognizer.getFaceName(face_id);
					sprintf(sFaceCaptionBuff, "%s %.2f", sFaceName, probFaceID);
					//sprintf(sFaceCaptionBuff, "%s", sFaceName); //dispaly score
					sFaceCaption = sFaceCaptionBuff;
					
					print_score  = probFaceID;
					print_faceid = face_id;
				}
				else
				{   // failed to recognize 
					//sprintf(sFaceCaptionBuff, "N\A %.2f", probFaceID);
					//sFaceCaption = sFaceCaptionBuff;
				}

				// collect and save unknown face exemplars
				if(probFaceID < face_threshold*0.9 || face_id != mouse_faceparam.ret_faceset_id )
				{
					if(mouse_faceparam.ret_online_collecting && (face_num ==1 || face_trackid == mouse_faceparam.ret_facetrack_id))
					{
						if( rect.x > gray_image.width()/4 && rect.x+rect.width < gray_image.width()*3/4 ) 
						{
							mouse_faceparam.updated = true;
							int nFaceSetIdx = faceRecognizer.getFaceSetIdx(mouse_faceparam.ret_faceset_id);
							bool bflag = faceRecognizer.tryInsertFace(cutFace.getBigCutFace(), nFaceSetIdx);
							//printf("insert flag %d\n", bflag);
						}
					}
				}
			}
			else recg_fps = 0;

			if( ! quiet )
			{
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

				//sprintf(sFaceCaptionBuff, "%.2f", print_score);
				//sFaceCaption = sFaceCaptionBuff;

				int trackid = -1; //face_trackid. don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            // log file
            if( fp_faceinfo != NULL )
            {
                // index,  rect,  landmark6,  bBlink, probBlink, bSmile, probSmile, bGender, probGender, nAgeID, probAge[nAgeID], face_id, probFaceID
				//fprintf( fp_faceinfo, "#%s# @%s@ ",    vidcap->filename(), sImgPath);
				fprintf( fp_faceinfo, "#%s# ",    vidcap->filename());
                fprintf( fp_faceinfo, "faceidx=( %06d %02d )", vidcap->index(), i+1 );
				fprintf( fp_faceinfo, "   rect=( %3d %3d %3d %3d )", rect.x, rect.y, rect.width, rect.height );
                fprintf( fp_faceinfo, "   landmark6=(" );
                int l;
                for( l = 0; l < 6; l++ )
                    fprintf( fp_faceinfo, " %3.0f %3.0f", landmark6[l].x, landmark6[l].y );
                fprintf( fp_faceinfo, " )");
                fprintf( fp_faceinfo, "   blink=( %+d %f )", bBlink, probBlink );
                fprintf( fp_faceinfo, "   smile=( %+d %f )", bSmile, probSmile );
                fprintf( fp_faceinfo, "   gender=( %+d %f )", bGender, probGender );
                fprintf( fp_faceinfo, "   agegroup=( %+d %f )", nAgeID, (nAgeID >= 0 && nAgeID < 4) ? probAge[nAgeID] : 1.0f );
                fprintf( fp_faceinfo, "   identity=( %+d %f )", face_id, probFaceID );
                fprintf( fp_faceinfo, "\n" );
            }
        }
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		ticks    = cvGetTickCount() - start_ticks;
		total_ticks += (ticks);
		frame_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );

		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
	
		//show Image
		if (image.width() <= 800)
		{
			//show image
			cvShowImage( str_title, image );
		}
		else
		{   // show scaled smaller image
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey( 30 );
		//int key = cvWaitKey( );
		if( key == ' ' ) // press the spacebar to pause the video play 
			cvWaitKey( 0 );                           
		else if( key == 27 )
			break;	    // press 'esc' to exit
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = rects[0].rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   // collect face exemplars for current selected facename
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			// turn on/off the autofocus flag
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting faces
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{   // save face models
				faceRecognizer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print speed info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );

	if (landmark_total_ticks != 0.0)
		landmark_fps = 1.0f  / ( landmark_total_ticks * temp / frames );

	if (align_total_ticks != 0.0)
		align_fps    = 1.0f  / ( align_total_ticks * temp / frames );

	if (blink_total_ticks != 0.0)
		blink_fps  = 1.0f  / (blink_total_ticks * temp / frames);

	if (smile_total_ticks != 0.0)
		smile_fps  = 1.0f  / (smile_total_ticks * temp / frames);

	if (gender_total_ticks != 0.0)
		gender_fps = 1.0f  / (gender_total_ticks * temp / frames);

	if (age_total_ticks != 0.0)
		age_fps = 1.0f  / (age_total_ticks * temp / frames);

	if (recg_total_ticks != 0.0)
		recg_fps   = 1.0f  / (recg_total_ticks  * temp / frames);

	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: Fd:%.1f Ld:%.1f Fa:%.1f Bl:%.1f Sm:%.1f Ge:%.1f Ag:%.1f Rc:%.1f",
		tracker_fps, landmark_fps, align_fps, 
		blink_fps,   smile_fps,    gender_fps, age_fps, recg_fps);

	//save updated face model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceRecognizer.saveFaceModelXML("faceset_model.xml");
	}

	
	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight = 10;
	faceRecognizer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceRecognizer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);
	//faceRecognizer.saveFaceModelXML("faceset_modelMerged#.xml");

	//release buff 
	
	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&thumbnailImg);
	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );
	
    if( fp_faceinfo != NULL )
        fclose( fp_faceinfo );

    return 0;
}
コード例 #29
0
ファイル: mainwindow.cpp プロジェクト: yajunyang/MainWindow
void MainWindow::level_set()
{
    IplImage * image=cvLoadImage("E:/yang.jpg",1);// 声纳.jpg    1-28.BMP
    CvSize size=cvGetSize(image);
    int i,j;
    IplImage * image_gray_a=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,1);
    IplImage * display1=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,1);
    IplImage * display2=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,1);
    IplImage * display3=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,1);
    IplImage * display_contour=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,3);
    IplImage * imgDisplay=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,3);

    IplImage * image32=cvCreateImage(cvGetSize(image),IPL_DEPTH_32F,1);
    IplImage * imgGauss=cvCreateImage(cvGetSize(image),IPL_DEPTH_32F,1);
    IplImage * image_laplace=cvCreateImage(cvGetSize(image),IPL_DEPTH_32F,1);
    IplImage * imgG=cvCreateImage(cvGetSize(image),IPL_DEPTH_32F,1);
    IplImage * imgU=cvCreateImage(cvGetSize(image),IPL_DEPTH_32F,1);
    IplImage * imgU_temp=cvCreateImage(cvGetSize(image),IPL_DEPTH_32F,1);
    IplImage * Ix=cvCreateImage(cvGetSize(image),IPL_DEPTH_32F,1);
    IplImage * Iy=cvCreateImage(cvGetSize(image),IPL_DEPTH_32F,1);
    IplImage * Edge=cvCreateImage(cvGetSize(image),IPL_DEPTH_32F,1);


    int iterNum = 850;

    cvCvtColor(image,image_gray_a,CV_BGR2GRAY);
    cvConvertScale(image_gray_a,image32,1,0);
    cvSmooth(image32,imgGauss,CV_GAUSSIAN,0,0,1.5,0);

    Sobel(imgGauss,Ix,Iy);

    CvScalar cur,cur1,cur2;
    for (i=0; i<size.height; i++)
    {
        for(j=0; j<size.width; j++)
        {
            cur1 = cvGet2D(Ix,i,j);
            cur2 = cvGet2D(Iy,i,j);
            cur.val[0] = 1.0/(1.0+cur1.val[0]*cur1.val[0]+cur2.val[0]*cur2.val[0]);
            cvSet2D(imgG,i,j,cur);
        }
    }

    int w=15;
    int w2=0;
    double c0=14.0;
    //define initial level set function (LSF) as -c0, 0, c0 at points outside, on
    //the boundary, and inside of a region R, respectively.
    for (i=0; i<size.height; i++)
    {
        for(j=0; j<size.width; j++)
        {
            if (i<w || i>size.height-w-1 || j<w || j>size.width-w-1)
            {
                cur.val[0] = c0;
            }
            else if (i>w && i<size.height-w-1 && j>w && j<size.width-w-1)
            {
                cur.val[0] = -c0;
            }
            else cur.val[0] = 0;
            // Note: this can be commented out. The intial LSF does NOT necessarily need a zero level set.
            cvSet2D(imgU,i,j,cur);
        }
    }

    //ImgDraw(image, imgU, imgDisplay);
    //cvNamedWindow("LevelSet");
    //cvShowImage("LevelSet",imgDisplay);
    //cvWaitKey(0);
    double epsilon=1.5;//1.5
    double timestep=7;//7
    double lambda=10;//5
    double mu=0.2/timestep;
    double alf=10.5;//1.5
    for (int k=0;k<iterNum;k++)
    {
        Evolution2(imgU,imgG,lambda,mu,alf,epsilon,timestep,1);

        if (k%20==0)
        {
            ImgDraw(image, imgU, imgDisplay);
            cvShowImage("LevelSet",imgDisplay);
            cvWaitKey(20);
        }
    }

    cvWaitKey(0);

    cvDestroyAllWindows();
    cvReleaseImage(&image_gray_a);
    cvReleaseImage(&display1);
    cvReleaseImage(&display2);
    cvReleaseImage(&display3);
    cvReleaseImage(&display_contour);
    cvReleaseImage(&imgDisplay);
    cvReleaseImage(&Ix);
    cvReleaseImage(&Iy);
    cvReleaseImage(&imgG);
    cvReleaseImage(&imgU);
    cvReleaseImage(&imgU_temp);
    cvReleaseImage(&imgGauss);
    cvReleaseImage(&image32);
    cvReleaseImage(&image_laplace);
    cvReleaseImage(&image);
}
コード例 #30
0
ファイル: LightStabilizer.cpp プロジェクト: kumarasn/tracker
void LightStabilizer::runAmbientDiagnostic(CamHandler *cam,FilterHandler*filterHandler)
{
	/*/
	 * Capturamos una serie de frames y sacamos datos como la cantidad de pixeles
	 * similares al color de la piel y demas
	 */


/*	int totalSkinPixels = 0;

	int skinPixels;

	cvWaitKey(2000); //para estabilizar el lente

	for ( int i = 0; i < max_frames_to_analize; i++ ){

		IplImage *image = cvQueryFrame(cam->getCam());

		filterHandler->getSkinImage(image);

		skinPixels = filterHandler->getSkinCount();

		totalSkinPixels = totalSkinPixels + skinPixels;

	}

	averageSkinPixels = totalSkinPixels / max_frames_to_analize;
*/


	IplImage* image;

	averageSkinPixels = 0;


	cam->showFrame(cam->getFirstWindow(),cvLoadImage("Images\\showHandOpen.jpg",-1));

	cvWaitKey(-1);

	cam->retrieveFrame();

	image = cam->retrieveFrame();

	filterHandler->getSkinImage(image);

	int skinHandOpen = filterHandler->getSkinCount();


	cam->showFrame(cam->getSecondWindow(),image);


	cam->showFrame(cam->getFirstWindow(),cvLoadImage("Images\\showHandClose.jpg",-1));

	cvWaitKey(-1);


	image = cam->retrieveFrame();

	image = cam->retrieveFrame();

	filterHandler->getSkinImage(image);

	int skinHandClose = filterHandler->getSkinCount();



	float dif = skinHandOpen * 1.0 / skinHandClose * 1.0;

	if ( dif < 1.01 ){

		logger->addEntry(componentName,"Hand Analisis failed. Using proportional values");

		skinHandOpen = (skinHandOpen < skinHandClose )? skinHandClose : skinHandClose;

		skinHandClose = skinHandOpen * 0.75;

	}

	skinThreshold = ((skinHandOpen - skinHandClose)*threshold_delta) + skinHandClose -averageSkinPixels;


	std::string s;
	std::stringstream out;
	out << skinHandOpen;
	s = out.str();

	logger->addEntry(componentName,"Skin pixels in Open Hand: " + s);


	std::stringstream outt;
	outt << skinHandClose;
	s = outt.str();

	logger->addEntry(componentName,"Skin pixels in Close Hand: " + s);


	std::stringstream outtt;
	outtt << threshold_delta;
	s = outtt.str();

	logger->addEntry(componentName,"Threshold Delta " + s);


	std::stringstream outttt;
	outttt << skinThreshold;
	s = outttt.str();

	logger->addEntry(componentName,"Threshold " + s);


}