int main( int argc, char** argv )
{
	IplImage *imageOut = 0;
	IplImage *frame1,*frame2;
	int B = 0;
	capture1 = cvCaptureFromAVI( argv[1] );
	capture2 = cvCaptureFromAVI( argv[2] );
	
	FPS1 = cvRound( cvGetCaptureProperty(capture1, CV_CAP_PROP_FPS) );
	FPS2 = cvRound( cvGetCaptureProperty(capture2, CV_CAP_PROP_FPS) );
	
	FPS = max(FPS1, FPS2);	// Use the fastest video speed.

	frame1 = cvQueryFrame( capture1 );
	frame2 = cvQueryFrame( capture2 );

	movieWidth1 = frame1->width;
	movieHeight1 = frame1->height;
	movieWidth2 = frame2->width;
	movieHeight2 = frame2->height;
	printf("Got a video source 1 with a resolution of %dx%d at %d fps.\n", movieWidth1, movieHeight1, FPS1);
	printf("Got a video source 2 with a resolution of %dx%d at %d fps.\n", movieWidth2, movieHeight2, FPS2);

	width = B + max(movieWidth1, movieWidth2) + B;
	height = B + movieHeight1 + B + movieHeight2 + B;
	
	CvSize size = cvSize(width, height);
    imageOut = cvCreateImage( size, 8, 3 );	// RGB image
    imageOut->origin = frame1->origin;
	printf("Combining the videos into a resolution of %dx%d at %d fps.\n", width, height, FPS);

	int fourCC_code = CV_FOURCC('M','J','P','G');	// M-JPEG codec (apparently isn't very reliable)
	int isColor = 1;				
	printf("Storing stabilized video into '%s'\n", argv[3]);
	videoWriter = cvCreateVideoWriter(argv[3], fourCC_code, FPS, size, isColor);
	
    cvNamedWindow( "CombineVids", 1 );
    //cvNamedWindow( "CombineVids", 0 );
    cvResizeWindow("CombineVids",1000,600);

    while (1)
    {
		frame1 = cvQueryFrame( capture1 );
		frame2 = cvQueryFrame( capture2 );
		if( !frame1 && !frame2 )
			break;
		else if(!frame1)
			frame1=frame2;
		else if(!frame2)
			frame2=frame1;

		cvSetImageROI( imageOut, cvRect(B, B, movieWidth1, movieHeight1) );
		cvCopy( frame1, imageOut, NULL );
		cvSetImageROI( imageOut, cvRect(B, B + movieHeight1 + B, movieWidth2, movieHeight2) );
		cvCopy( frame2, imageOut, NULL );		

		cvResetImageROI( imageOut );
		cvWriteFrame(videoWriter, imageOut);      
		cvShowImage( "CombineVids", imageOut );		

   		int c = cvWaitKey(30);	
		if( (char)c == 27 )	
            break;	
	}
	return 0;
}
// Function for Computing Word Segmentation
// Inputs :-
//			(IplImage*) binImg : Pointer to the 1-Channel Binary Image
//          (int*) numWord : It will contain the number of words in text line
//          (IplImage**) wordImg : An array of IplImages where each image corresponds to a particular word of text
// Outputs :-
//			(unsigned char) errCode : The Error Code of Execution
// Invoked As : errCode = computeWordSegmentation( &binImg , &numWord, &wordImg );
unsigned char computeWordSegmentation( IplImage* binImg , int* numWord, IplImage** wordImg)
{
	// Check Inputs
	if((binImg->imageSize<=0) || (binImg->nChannels!=1))
	{
		// Error Code 1: Invalid Input
		return(1);
	}

	// Word Segmentation
	int th1 =20;
	int baseIndx = -1;
	int widthStep = binImg->widthStep;
	int count=0,flag=0, start=0,end=0;
	for(int x=0 ; x< (binImg->width) ; ++x)
	{
		// update base index
		baseIndx = baseIndx + 1 ;
		
		int currIndx = baseIndx - widthStep;

		flag=0;
		for(int y =0 ; y < (binImg->height) ; ++y )
		{
			currIndx = currIndx +widthStep;
			
			if( binImg->imageData[currIndx]<0)//>-1
			  {
				  flag=1;
				  break;
			  }			
		}
		if(flag==0 or (x == binImg->width -1))
		{
			if(flag==1)
				end = binImg->width -1;
			else
				end = x-1;
			if( end - start > th1)
			{				
				count++;
			// Allocate Memory for the Word Image
				wordImg[count] = cvCreateImage( cvSize( end-start + 2 , binImg->height) , IPL_DEPTH_8U , 1 );
				int bindex = -binImg->widthStep;
				int nwidthStep = end - start + 1 + 4 - (end-start + 1)%4;
				
				int bindex1 = -nwidthStep;
				for(int l=0;l< binImg->height;l++)
				{
					bindex = bindex + binImg->widthStep;
					bindex1 = bindex1 + nwidthStep;
					int cindex = bindex + start - 1;
					int cindex1 = bindex1 - 1; 
					for(int m=0;m<nwidthStep;m++)
					{
						cindex = cindex + 1;
						cindex1 = cindex1 + 1;
						wordImg[count]->imageData[cindex1] = binImg->imageData[cindex]; 
						
					}
				
				}
				
				start = x + 1;
		     }
		     else 
			{
				start = x + 1;			
			}	
		}
	
	}
		
		*numWord = count;
		 
    // Error Code 0 : All well
    return( 0 );	
}
// Function for Computing Line Segmentation
// Inputs :-
//			(IplImage*) binImg : Pointer to the 1-Channel Binary Image
//          (int*) numLine : It will contain the number of Lines in text box
//          (IplImage**) lineImg : An array of IplImages where each image corresponds to a particular line of text
// Outputs :-
//			(unsigned char) errCode : The Error Code of Execution
// Invoked As : errCode = computeLineSegmentation( &binImg , &numLine, &lineImg );
unsigned char computeLineSegmentation( IplImage* binImg , int* numLine, IplImage** lineImg)
{
	
	// Check Inputs
	if((binImg->imageSize<=0) || (binImg->nChannels!=1))
	{
		// Error Code 1: Invalid Input
		return(1);
	}

	// Line Segmentation
	int th1 = 0;// may depend on font
	int baseIndx = -(binImg->widthStep);
	int count=0,flag=0, start=0,end=0;
	for(int y=0 ; y< (binImg->height) ; ++y)
	{
		// update base index
		baseIndx = baseIndx + (binImg->widthStep) ;	
		int currIndx = baseIndx - 1;
	    flag=0;
		for(int x =0 ; x < (binImg->width) ; ++x )
		{
			currIndx = currIndx +1;
			printf("%d\n",binImg->imageData[currIndx]);
			if( binImg->imageData[currIndx]<0)//>-1
			  {
				  flag=1;
				  break;
			  }			
		}
		
		if(flag==0 or (y==binImg->height -1) )
		{
			if(flag==1)
				end = binImg->height -1;
			else
				end = y-1;
			if( end - start > th1)
			{									
				count++;
			// Allocate Memory for the Line Image
				lineImg[count] = cvCreateImage( cvSize( binImg->width , end-start +  1  ) , IPL_DEPTH_8U , 1 );
				int bindex = (start-1)*(binImg->widthStep);
				int bindex1 = -binImg->widthStep;
				for(int l=start;l<=end;l++)
				{
					bindex = bindex + binImg->widthStep;
					bindex1 = bindex1 + binImg->widthStep;
					int cindex = bindex - 1;
					int cindex1 = bindex1 - 1; 
					for(int m=0;m< binImg->width;m++)
					{
						cindex++;
						cindex1++;
						lineImg[count]->imageData[cindex1] = binImg->imageData[cindex]; 
						
					}
				
				}
				start = y+1;
		     }
		     else 
			{
				start = y+1;			
			}	
		}
		
	}
		
		*numLine = count; 
    // Error Code 0 : All well
    return( 0 );	
}
コード例 #4
0
ファイル: iqi.cpp プロジェクト: moravianlibrary/differ
CvScalar calcQualityIndex :: compare(IplImage *source1, IplImage *source2, Colorspace space)
{
    IplImage *src1,* src2;
    src1 = colorspaceConversion(source1, space);
    src2 = colorspaceConversion(source2, space);

    int x = src1->width, y = src1->height;
    int nChan = src1->nChannels, d = IPL_DEPTH_32F;
    CvSize size = cvSize(x, y);

    //creating FLOAT type images of src1 and src2
    IplImage *img1 = cvCreateImage(size, d, nChan);
    IplImage *img2 = cvCreateImage(size, d, nChan);

    //Image squares
    IplImage *img1_sq = cvCreateImage(size, d, nChan);
    IplImage *img2_sq = cvCreateImage(size, d, nChan);
    IplImage *img1_img2 = cvCreateImage(size, d, nChan);

    cvConvert(src1, img1);
    cvConvert(src2, img2);

    //Squaring the images thus created
    cvPow(img1, img1_sq, 2);
    cvPow(img2, img2_sq, 2);
    cvMul(img1, img2, img1_img2, 1);

    IplImage *mu1 = cvCreateImage(size, d, nChan);
    IplImage *mu2 = cvCreateImage(size, d, nChan);
    IplImage *mu1_sq = cvCreateImage(size, d, nChan);
    IplImage *mu2_sq = cvCreateImage(size, d, nChan);
    IplImage *mu1_mu2 = cvCreateImage(size, d, nChan);

    IplImage *sigma1_sq = cvCreateImage(size, d, nChan);
    IplImage *sigma2_sq = cvCreateImage(size, d, nChan);
    IplImage *sigma12 = cvCreateImage(size, d, nChan);

    //PRELIMINARY COMPUTING

    //average smoothing is performed
    cvSmooth(img1, mu1, CV_BLUR, B, B);
    cvSmooth(img2, mu2, CV_BLUR, B, B);

    //gettting mu, mu_sq, mu1_mu2
    cvPow(mu1, mu1_sq, 2);
    cvPow(mu2, mu2_sq, 2);
    cvMul(mu1, mu2, mu1_mu2, 1);

    //calculating sigma1, sigma2, sigma12
    cvSmooth(img1_sq, sigma1_sq, CV_BLUR, B, B);
    cvSub(sigma1_sq, mu1_sq, sigma1_sq);

    cvSmooth(img2_sq, sigma2_sq, CV_BLUR, B, B);
    cvSub(sigma2_sq, mu2_sq, sigma2_sq);

    cvSmooth(img1_img2, sigma12, CV_BLUR, B, B);
    cvSub(sigma12, mu1_mu2, sigma12);

    //Releasing unused images
    cvReleaseImage(&img1);
    cvReleaseImage(&img2);
    cvReleaseImage(&img1_sq);
    cvReleaseImage(&img2_sq);
    cvReleaseImage(&img1_img2);

    // creating buffers for numerator and denominator
    IplImage *numerator1 = cvCreateImage(size, d, nChan);
    IplImage *numerator = cvCreateImage(size, d, nChan);
    IplImage *denominator1 = cvCreateImage(size, d, nChan);
    IplImage *denominator2 = cvCreateImage(size, d, nChan);
    IplImage *denominator = cvCreateImage(size, d, nChan);

    // FORMULA to calculate Image Quality Index

    // (4*sigma12)
    cvScale(sigma12, numerator1, 4);

    // (4*sigma12).*(mu1*mu2)
    cvMul(numerator1, mu1_mu2, numerator, 1);

    // (mu1_sq + mu2_sq)
    cvAdd(mu1_sq, mu2_sq, denominator1);

    // (sigma1_sq + sigma2_sq)
    cvAdd(sigma1_sq, sigma2_sq, denominator2);

    //Release images
    cvReleaseImage(&mu1);
    cvReleaseImage(&mu2);
    cvReleaseImage(&mu1_sq);
    cvReleaseImage(&mu2_sq);
    cvReleaseImage(&mu1_mu2);
    cvReleaseImage(&sigma1_sq);
    cvReleaseImage(&sigma2_sq);
    cvReleaseImage(&sigma12);
    cvReleaseImage(&numerator1);

    // ((mu1_sq + mu2_sq).*(sigma1_sq + sigma2_sq))
    cvMul(denominator1, denominator2, denominator, 1);

    //image_quality map
    image_quality_map = cvCreateImage(size, d, nChan);
    float *num, *den, *res;
    num = (float*)(numerator->imageData);
    den = (float*)(denominator->imageData);
    res = (float*)(image_quality_map->imageData);

    // dividing by hand
    // ((4*sigma12).*(mu1_mu2))./((mu1_sq + mu2_sq).*(sigma1_sq + sigma2_sq))
    for (int i=0; i<(x*y*nChan); i++) {
        if (den[i] == 0)
        {
            num[i] = (float)(1.0);
            den[i] = (float)(1.0);
        }
        res[i] = 1.0*(num[i]/den[i]);
    }

    // cvDiv doesnt work
    //cvDiv(numerator, denominator, image_quality_map, 1);

    // image_quality_map created in image_quality_map
    // average is taken
    image_quality_value = cvAvg(image_quality_map);

    //Release images
    cvReleaseImage(&numerator);
    cvReleaseImage(&denominator);
    cvReleaseImage(&denominator1);
    cvReleaseImage(&denominator2);
    cvReleaseImage(&src1);
    cvReleaseImage(&src2);

    return image_quality_value;
}
                                CvMat* projPoints1, CvMat* projPoints2,
                                CvMat* points4D);

CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
                             CvMat* new_points1, CvMat* new_points2);


/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
   alpha=0 - only valid pixels will be retained in the undistorted image
   alpha=1 - all the source image pixels will be retained in the undistorted image
*/
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
                                         const CvMat* dist_coeffs,
                                         CvSize image_size, double alpha,
                                         CvMat* new_camera_matrix,
                                         CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
                                         CvRect* valid_pixel_ROI CV_DEFAULT(0),
                                         int center_principal_point CV_DEFAULT(0));

/* Converts rotation vector to rotation matrix or vice versa */
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
                         CvMat* jacobian CV_DEFAULT(0) );

/* Finds perspective transformation between the object plane and image (view) plane */
CVAPI(int) cvFindHomography( const CvMat* src_points,
                             const CvMat* dst_points,
                             CvMat* homography,
                             int method CV_DEFAULT(0),
                             double ransacReprojThreshold CV_DEFAULT(3),
                             CvMat* mask CV_DEFAULT(0));
コード例 #6
0
ファイル: main.cpp プロジェクト: Barbakas/windage
void main()
{
	// connect camera
	camera1 = new CPGRCamera();
	camera2 = new CPGRCamera();
	camera1->open();
	camera2->open();
	camera1->start();
	camera2->start();

	input1 = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 4);
	temp1 = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 4);
	gray1 = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	input2 = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 4);
	temp2 = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 4);
	gray2 = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);

	cvNamedWindow("image1");
	cvNamedWindow("image2");

	// initialize tracker
	IplImage* referenceImage = cvLoadImage("reference.png", 0);
	tracker1 = new windage::ModifiedSURFTracker();
	((windage::ModifiedSURFTracker*)tracker1)->Initialize(778.195, 779.430, 324.659, 235.685, -0.333103, 0.173760, 0.000653, 0.001114, 45);
	((windage::ModifiedSURFTracker*)tracker1)->RegistReferenceImage(referenceImage, 267.0, 200.0, 4.0, 8);
	((windage::ModifiedSURFTracker*)tracker1)->InitializeOpticalFlow(WIDTH, HEIGHT, 10, cvSize(15, 15), 3);
	((windage::ModifiedSURFTracker*)tracker1)->SetOpticalFlowRunning(true);
	tracker1->GetCameraParameter()->InitUndistortionMap(WIDTH, HEIGHT);

	tracker2 = new windage::ModifiedSURFTracker();
	((windage::ModifiedSURFTracker*)tracker2)->Initialize(778.195, 779.430, 324.659, 235.685, -0.333103, 0.173760, 0.000653, 0.001114, 45);
	((windage::ModifiedSURFTracker*)tracker2)->RegistReferenceImage(referenceImage, 267.0, 200.0, 4.0, 8);
	((windage::ModifiedSURFTracker*)tracker2)->InitializeOpticalFlow(WIDTH, HEIGHT, 10, cvSize(15, 15), 3);
	((windage::ModifiedSURFTracker*)tracker2)->SetOpticalFlowRunning(true);
	tracker2->GetCameraParameter()->InitUndistortionMap(WIDTH, HEIGHT);

	// initialize ar tools
	arTool = new windage::ARForOpenGL();
	((windage::ARForOpenGL*)arTool)->Initialize(WIDTH, HEIGHT, true);
	((windage::ARForOpenGL*)arTool)->AttatchCameraParameter(tracker1->GetCameraParameter());

	// initialize spatial sensors
	cubeGroup = new CubeSensorGroup();
	((CubeSensorGroup*)cubeGroup)->Initialize(1, Vector3(75, 75, 75));

	extendedCubeGroup = new CubeSensorGroup();
	((CubeSensorGroup*)extendedCubeGroup)->Initialize(2, Vector3(0, 0, 0), 3, 10.0, 15.0);

	sensorDetector = new DETECTOR_TYPE();
	((DETECTOR_TYPE*)sensorDetector)->Initialize(ACTIVATION_TRESHOLD);
	((DETECTOR_TYPE*)sensorDetector)->AttatchCameraParameter(0, tracker1->GetCameraParameter());
	((DETECTOR_TYPE*)sensorDetector)->AttatchCameraParameter(1, tracker2->GetCameraParameter());

	extendedSensorDetector = new DETECTOR_TYPE();
	((DETECTOR_TYPE*)extendedSensorDetector)->Initialize(ACTIVATION_TRESHOLD);
	((DETECTOR_TYPE*)extendedSensorDetector)->AttatchCameraParameter(0, tracker1->GetCameraParameter());
	((DETECTOR_TYPE*)extendedSensorDetector)->AttatchCameraParameter(1, tracker2->GetCameraParameter());

	// attatch sensors
	sensorDetector->AttatchSpatialSensors(cubeGroup->GetSensors());
	extendedSensorDetector->AttatchSpatialSensors(extendedCubeGroup->GetSensors());

	// initialize rendering engine
	OpenGLRenderer::init(WIDTH, HEIGHT);
	OpenGLRenderer::setLight();
	glutDisplayFunc(display);
	glutIdleFunc(idle);
	glutKeyboardFunc(keyboard);
	
	glutMainLoop();

	camera1->stop();
	camera1->close();
	camera2->stop();
	camera2->close();
}
コード例 #7
0
ファイル: cap_dc1394_v2.cpp プロジェクト: 2693/opencv
bool CvCaptureCAM_DC1394_v2_CPP::grabFrame()
{
    dc1394capture_policy_t policy = DC1394_CAPTURE_POLICY_WAIT;
    bool code = false, isColor;
    dc1394video_frame_t *dcFrame = 0, *fs = 0;
    int i, nch;

    if (!dcCam || (!started && !startCapture()))
        return false;

    dc1394_capture_dequeue(dcCam, policy, &dcFrame);

    if (!dcFrame)
        return false;

    if (/*dcFrame->frames_behind > 1 ||*/ dc1394_capture_is_frame_corrupt(dcCam, dcFrame) == DC1394_TRUE)
    {
        goto _exit_;
    }

    isColor = dcFrame->color_coding != DC1394_COLOR_CODING_MONO8 &&
              dcFrame->color_coding != DC1394_COLOR_CODING_MONO16 &&
              dcFrame->color_coding != DC1394_COLOR_CODING_MONO16S;

    if (nimages == 2)
    {
        fs = (dc1394video_frame_t*)calloc(1, sizeof(*fs));

        //dc1394_deinterlace_stereo_frames(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);
        dc1394_deinterlace_stereo_frames_fixed(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);

        dc1394_capture_enqueue(dcCam, dcFrame); // release the captured frame as soon as possible
        dcFrame = 0;
        if (!fs->image)
            goto _exit_;
        isColor = colorStereo;
    }
    nch = isColor ? 3 : 1;

    for (i = 0; i < nimages; i++)
    {
        IplImage fhdr;
        dc1394video_frame_t f = fs ? *fs : *dcFrame, *fc = &f;
        f.size[1] /= nimages;
        f.image += f.size[0] * f.size[1] * i; // TODO: make it more universal
        if (isColor)
        {
            if (!frameC)
                frameC = (dc1394video_frame_t*)calloc(1, sizeof(*frameC));
            frameC->color_coding = nch == 3 ? DC1394_COLOR_CODING_RGB8 : DC1394_COLOR_CODING_MONO8;
            if (nimages == 1)
            {
                dc1394_convert_frames(&f, frameC);
                dc1394_capture_enqueue(dcCam, dcFrame);
                dcFrame = 0;
            }
            else
            {
                f.color_filter = bayerFilter;
                dc1394_debayer_frames(&f, frameC, bayer);
            }
            fc = frameC;
        }
        if (!img[i])
            img[i] = cvCreateImage(cvSize(fc->size[0], fc->size[1]), 8, nch);
        cvInitImageHeader(&fhdr, cvSize(fc->size[0], fc->size[1]), 8, nch);
        cvSetData(&fhdr, fc->image, fc->size[0]*nch);

    // Swap R&B channels:
    if (nch==3)
        cvConvertImage(&fhdr,&fhdr,CV_CVTIMG_SWAP_RB);

        if( rectify && cameraId == VIDERE && nimages == 2 )
        {
            if( !maps[0][0] || maps[0][0]->width != img[i]->width || maps[0][0]->height != img[i]->height )
            {
                CvSize size = cvGetSize(img[i]);
                cvReleaseImage(&maps[0][0]);
                cvReleaseImage(&maps[0][1]);
                cvReleaseImage(&maps[1][0]);
                cvReleaseImage(&maps[1][1]);
                maps[0][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
                maps[0][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
                maps[1][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
                maps[1][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
                char buf[4*4096];
                if( getVidereCalibrationInfo( buf, (int)sizeof(buf) ) &&
                    initVidereRectifyMaps( buf, maps[0], maps[1] ))
                    ;
                else
                    rectify = false;
            }
            cvRemap(&fhdr, img[i], maps[i][0], maps[i][1]);
        }
        else
            cvCopy(&fhdr, img[i]);
    }

    code = true;

_exit_:
    if (dcFrame)
        dc1394_capture_enqueue(dcCam, dcFrame);
    if (fs)
    {
        if (fs->image)
            free(fs->image);
        free(fs);
    }

    return code;
}
コード例 #8
0
ファイル: MST_RGB.cpp プロジェクト: Gavin654/cv
int main(int argc, char** argv)
{
	FILE *fp;

	IplImage* frame_in = 0; //聲明IplImage指針
	//開新視窗
    cvNamedWindow( "ori", 1 );
	cvNamedWindow( "DIBR", 1 );
	
	//////////資料輸入
	frame_in = cvLoadImage("D:\\3-2(d).bmp");
	 
   	///////找尋影像的大小
	int height     = frame_in->height;
	int width      = frame_in->width;
	int step  =	frame_in->widthStep/sizeof(uchar);//step       = frame_in->widthStep;
	printf("h = %d w = %d s = %d\n",height,width,step);
	/////宣告暫存器
	int i,j,k,l;
	int avg_reg = 0,diff_bom,diff_right,diff_top,diff_li;
	int reg_A,reg_B,reg_C,reg_D;

	/////宣告影像空間CV
	IplImage* frame_DIBR = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,3);
    IplImage* frame_gray = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,1);
	IplImage* frame_avg = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,3);
	IplImage* frame_reg = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,3);
	IplImage* frame_out = cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,3);
	
	for( i=0 ; i < height ; i++ ){
		for( j=0 ; j < width ; j++ ){	
			//Y[i][j] = frame_gray->imageData[i*width+j];
			B[i][j] = frame_in->imageData[i*step+j*3+0];
			G[i][j] = frame_in->imageData[i*step+j*3+1];
			R[i][j] = frame_in->imageData[i*step+j*3+2];
			Y[i][j] = R[i][j]*(0.299) + G[i][j]*(0.587) + B[i][j]*(0.114);  
			/*if (i>=0)
			Y[i][j]=10;
			if (i>=12)
			Y[i][j]=100;
			if (i>=50)
			Y[i][j]=200;		*/	

			depth_floor[i][j] = (int)floor((double)((back_TH*i/height)));
			FF[i][j] = 0;
		}//end j		
	}//end i
	/////////////////演算法
	for( i=0 ; i < height ; i++ ){
		for( j=0 ; j < width ; j++ ){				
			if ((i%4)==0 && (j%4)==0){
			avg_R[i][j] = R[i][j]+R[i][j+1]+R[i][j+2]+R[i][j+3]+
				R[i+1][j]+R[i+1][j+1]+R[i+1][j+2]+R[i+1][j+3]+
				R[i+2][j]+R[i+2][j+1]+R[i+2][j+2]+R[i+2][j+3]+
				R[i+3][j]+R[i+3][j+1]+R[i+3][j+2]+R[i+3][j+3];	

			avg_G[i][j] = G[i][j]+G[i][j+1]+G[i][j+2]+G[i][j+3]+
				G[i+1][j]+G[i+1][j+1]+G[i+1][j+2]+G[i+1][j+3]+
				G[i+2][j]+G[i+2][j+1]+G[i+2][j+2]+G[i+2][j+3]+
				G[i+3][j]+G[i+3][j+1]+G[i+3][j+2]+G[i+3][j+3];	

			avg_B[i][j] = B[i][j]+B[i][j+1]+B[i][j+2]+B[i][j+3]+
				B[i+1][j]+B[i+1][j+1]+B[i+1][j+2]+B[i+1][j+3]+
				B[i+2][j]+B[i+2][j+1]+B[i+2][j+2]+B[i+2][j+3]+
				B[i+3][j]+B[i+3][j+1]+B[i+3][j+2]+B[i+3][j+3];	
			for( k=0 ; k < 4 ; k++ )
				for( l=0 ; l < 4 ; l++ ){
					avg_R[i+k][j+l] = avg_R[i][j];
					avg_G[i+k][j+l] = avg_G[i][j];
					avg_B[i+k][j+l] = avg_B[i][j];
				}
			}

		}//end j		
	}//end i

	for( i=0 ; i < height ; i=i+4 ){
		for( j=0 ; j < width ; j=j+4 ){	
			if ((i%4)==0 && (j%4)==0){
				//diff_top = abs(avg[i][j]-avg[i-4][j]);
				//diff_li = abs(avg[i][j]-avg[i][j-4]);
				diff_bom = abs(avg_R[i][j]-avg_R[i+4][j])+abs(avg_G[i][j]-avg_G[i+4][j])+abs(avg_B[i][j]-avg_B[i+4][j]);
				diff_right = abs(avg_R[i][j]-avg_R[i][j+4])+abs(avg_G[i][j]-avg_G[i][j+4])+abs(avg_B[i][j]-avg_B[i][j+4]);
			//	printf("[%d][%d] avg = %d avg_right = %d avg_bom = %d bom = %d right = %d\n",i+k,j+l,avg[i][j],avg[i][j+4],avg[i+4][j],diff_bom,diff_right);
			//	printf("FF = %d FF_R = %d FF_B = %d\n",FF[i][j],FF[i][j+4],FF[i+4][j]);
			}
				/*printf("top = %d li = %d\n",diff_top,diff_li);
				_getch();*/
				for( k=0 ; k <= 3 ; k++ )
					for( l=0 ; l <= 3 ; l++ ){
					//	printf("[%d][%d] bom = %d right = %d\n",i+k,j+l,diff_bom,diff_right);
						
					/*if (i == 0 ){///第一行皆為0
						depth_out[i+k][j+l] = 0;
						FF[i+k][j+l] = 1;
						printf("1\n");

					}else*/ if (diff_bom<=TH_diff && diff_right<=TH_diff && FF[i+4][j] == 0 && FF[i][j+4] == 0 ){	//下右皆為0,直接給值
						if (FF[i][j] == 0){
							depth_out[i+k][j+l] = depth_floor[i][j] ;
							if ((k%4)==3 && (l%4)==3){
								FF[i][j] = 255;
							//printf("  0  ");
							//_getch();
							}
						}
						depth_out[i+k+4][j+l] = depth_out[i][j] ;
						depth_out[i+k][j+l+4] = depth_out[i][j] ;

						if ((k%4)==3 && (l%4)==3){
							FF[i+4][j] = 255;						
							FF[i][j+4] = 255;
						//	printf("  2  ");
							//_getch();
						}	
//
					}else if (diff_right<=TH_diff && FF[i][j] == 0 && FF[i][j+4] == 255 ){	//原點為0,右邊點為1,右→原點
						depth_out[i+k][j+l] = depth_out[i][j+4];
						//printf("3");
						//printf("FF = %d FF_R = %d\n",FF[i][j],FF[i][j+4]);
					//	_getch();

					}else if (diff_bom<=TH_diff && FF[i][j] == 0 && FF[i+4][j] == 255 ){	//原點為0,下邊點為1,下→原點
						depth_out[i+k][j+l] = depth_out[i+4][j];
						//printf("4\n");

					}else if (diff_right<=TH_diff && FF[i][j+4] == 0 ){	//給右邊點				
						if (FF[i][j+4] == 0)
							depth_out[i+k][j+l] = depth_floor[i][j];

							depth_out[i+k][j+l+4] = depth_out[i][j];
							if ((k%4)==3 && (l%4)==3){
								FF[i][j+4] = 255;					
								//printf("5\n");
							}

					}else if (diff_bom<=TH_diff && FF[i+4][j] == 0 ){		//給下邊點
						if (FF[i][j] == 0)
							depth_out[i+k][j+l] = depth_floor[i][j];
								
							depth_out[i+k+4][j+l] = depth_out[i][j];
						
						if ((k%4)==3 && (l%4)==3){
							FF[i+4][j] = 255;
							//printf("6\n");
						}
					}else if (depth_out[i+k][j+l] <= 0 && FF[i][j] == 0){
						depth_out[i+k][j+l]=depth_floor[i][j];
					}
				}
				//	printf("[%d][%d] bom = %d right = %d",i+k,j+l,diff_bom,diff_right);
					//_getch();
			
		}//end j		
	}//end i

	for( i=0 ; i < height ; i++ ){
		for( j=0 ; j < width ; j++ ){	
			////lowpass
			reg_A = 0;reg_B = 0;
			for( k=-1 ; k <= 1 ; k++ )
				for( l=-1 ; l <= 1 ; l++ ){
					reg_A += depth_out[i+k][j+l];
					reg_B++;
				}
				reg_C = reg_A/reg_B;
				if (reg_C<=0)
					reg_C = 0;
				else if (reg_C>=255)
					reg_C=255;
			depth_out[i][j] = (unsigned char)reg_C;
		}
	}
	
	/////DIBR
	for( i=0 ; i < height ; i++ ){
		for( j=0 ; j < width ; j++ ){	
			int motion = (int)((10*depth_out[i][j]/(85+depth_out[i][j])));
			frame_out->imageData[i*step+(j)*3+2] = frame_in->imageData[i*step+j*3+2];
			frame_out->imageData[i*step+(j)*3+1] = frame_in->imageData[i*step+j*3+1];
			frame_out->imageData[i*step+(j)*3+0] = frame_in->imageData[i*step+j*3+0];
			if ((j-motion)>0){
				frame_out->imageData[i*step+(j-motion)*3+2] = frame_in->imageData[i*step+j*3+2];
			}
			if ((j+motion)<width){
				frame_out->imageData[i*step+(j+motion)*3+0] = frame_in->imageData[i*step+j*3+0];
				frame_out->imageData[i*step+(j+motion)*3+1] = frame_in->imageData[i*step+j*3+1];
			}			
		}//end j		
	}//end i
	///////////////輸出
		for( i=0 ; i < height ; i++ )
			for( j=0 ; j < width ; j++ ){	
				//if (line[i] == 255){
								
				frame_avg->imageData[i*step+j*3+0] = depth_out[i][j];//B
				frame_avg->imageData[i*step+j*3+1] = depth_out[i][j];//G
				frame_avg->imageData[i*step+j*3+2] = depth_out[i][j];//R

				frame_reg->imageData[i*step+j*3+0] = depth_outB[i][j];//B
				frame_reg->imageData[i*step+j*3+1] = depth_outB[i][j];//G
				frame_reg->imageData[i*step+j*3+2] = depth_outB[i][j];//R
			}
			
	//fp=fopen("D:/tt.raw","wb"); //寫圖
	//fwrite(avg,height,width,fp);
	//fclose(fp);

	   cvShowImage( "ori", frame_avg );
	   cvShowImage( "DIBR", frame_out );
	   //存影像
	   cvSaveImage("D:\\3-4.jpg",frame_out);
	 
	cvWaitKey(0);
        
	cvDestroyWindow( "ori" );//銷毀視窗
	cvDestroyWindow( "DIBR" );//銷毀視窗
	
	return 0;
}
コード例 #9
0
ファイル: PlateFinder.cpp プロジェクト: duyz112/ANPR-Demo
IplImage* PlateFinder::FindPlate (IplImage *src) {
	IplImage* plate;
	IplImage* contourImg = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);	// anh tim contour
	IplImage* grayImg =  cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);	// anh xam
	cvCvtColor(src, grayImg, CV_RGB2GRAY);

	IplImage* cloneImg = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);
	cloneImg = cvCloneImage(src);
	
	// tien xu ly anh
	cvCopy(grayImg, contourImg);
	cvNormalize(contourImg, contourImg, 0, 255, CV_MINMAX);
	ImageRestoration(contourImg);
	
	IplImage* rectImg = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);
	cvMerge(contourImg, contourImg, contourImg, NULL, rectImg); // tron anh

	// tim contour cua buc anh
	CvMemStorage *storagePlate = cvCreateMemStorage(0);
	CvSeq *contours = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), storagePlate);
	cvFindContours(contourImg, storagePlate, &contours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

	//cvShowImage("contourImg", contourImg);
	

	int xmin, ymin, xmax, ymax, w, h, s, r;
	int count;
	double ratio;	// ty le chieu rong tren chieu cao
	CvRect rectPlate; 

	// luu lai cac anh co kha nang la bien so
	IplImage** plateArr = new IplImage *[5];
	int j = 0;
	for (int i = 0; i < 5; i++)
	{
		plateArr[i] = NULL;
	}

	while (contours) {
		count = contours->total;
		CvPoint *PointArray = new CvPoint[count];
		cvCvtSeqToArray (contours, PointArray, CV_WHOLE_SEQ);

		for (int i = 0; i < count; i++)
		{
			if (i == 0)
			{
				xmin = xmax = PointArray[i].x;
				ymin = ymax = PointArray[i].y;
			}

			if (PointArray[i].x > xmax) {
				xmax = PointArray[i].x;
			}
			if (PointArray[i].x < xmin)  {
				xmin = PointArray[i].x;
			}

			if (PointArray[i].y > ymax) {
				ymax = PointArray[i].y;
			}
			if (PointArray[i].y < ymin)  {
				ymin = PointArray[i].y;
			}
		}

		w = xmax - xmin;
		h = ymax - ymin;
		s = w * h;

		cvRectangle (rectImg, cvPoint(xmin, ymin), cvPoint(xmax, ymax), RED);

		// loai bo nhung hinh chu nhat co ti le khong dung
		if (s != 0) {
			r = (contourImg->height * contourImg->width) / s;
		} else {
			r = 1000;
		}

		if (w == 0 && h == 0) {
			ratio = 0;
		} else {
			ratio = (double)w/h;
		}

		if (r > 30 && r < 270) {
			// ve ra hcn mau xanh la
			cvRectangle (rectImg, cvPoint(xmin, ymin), cvPoint(xmax, ymax), GREEN);

			if (ratio > 2.6 && ratio < 7) {
				cvRectangle (rectImg, cvPoint(xmin, ymin), cvPoint(xmax, ymax), BLUE);

				if (w > 80 && w < 250 && h > 25 && h < 150) {
					rectPlate = cvRect (xmin, ymin, w, h);

					cvRectangle (cloneImg, cvPoint(rectPlate.x, rectPlate.y),
						cvPoint(rectPlate.x + rectPlate.width, rectPlate.y + rectPlate.height), RED, 3);

					// cat bien so
					plate = cvCreateImage(cvSize(rectPlate.width, rectPlate.height), IPL_DEPTH_8U, 3);
					cvSetImageROI(src, rectPlate);
					cvCopy(src, plate, NULL);
					cvResetImageROI(src);

					// luu vao mang cac bien so plateArr
					int cnt = CountCharacter(plate);
					if (cnt >= 5) {
						plateArr[j] = cvCloneImage(plate);
						j++;
					}
				}
			}
		}

		delete []PointArray;

		contours = contours->h_next;
	}

	// sap xep
	if (plateArr[0]) 
	{
		int w = plateArr[0]->width;

		int flag;
		for (int i = 1; i < 4; i++)
		{
			if (plateArr[i] && plateArr[i]->width < w)
			{
				flag = i;
			}
		}

		plateArr[0] = plateArr[flag];
	}

	cvShowImage("cloneImg", cloneImg);
	//cvShowImage("rectImg", rectImg);
	//cvShowImage("plate", plateArr[0]);

	cvReleaseImage(&contourImg);
	cvReleaseImage(&rectImg);
	cvReleaseImage(&plate);

	return plateArr[0];
}
コード例 #10
0
ファイル: CameraPublic.cpp プロジェクト: Doufang/PistonRing
int OnNewvision(IplImage* currImageBefore, IplImage* maskImage)
{
	//int pAPosition[6][2]={0};//pointArrayPosition
	//pAPosition[0][0]=0; pAPosition[0][1]=6;
	//pAPosition[1][0]=6; pAPosition[1][1]=0;
	//pAPosition[2][0]=12;pAPosition[2][1]=6;

	//pAPosition[3][0]=0; pAPosition[3][1]=9;
	//pAPosition[4][0]=6; pAPosition[4][1]=12;
	//pAPosition[5][0]=12;pAPosition[5][1]=9;

	//ListPoint pointSet1;
	//pointSet1.Item = (ArrayPoint*)MallocArrayPoint();
	//for (int dIndex = 0; dIndex <3; dIndex++)
	//{//cvPoint(wIndex, hIndex)
	//	pointSet1.Item->push_back(cvPoint(pAPosition[dIndex][0], pAPosition[dIndex][1]));
	//}	
	//
	//cvCircleObj outCircle1;
	//if(pointSet1.Item->size() == 0)
	//	return;
	//FitCircleObj(pointSet1, &outCircle1);

	//ListPoint pointSet2;
	//pointSet2.Item = (ArrayPoint*)MallocArrayPoint();
	//for (int dIndex = 3; dIndex <6; dIndex++)
	//{//cvPoint(wIndex, hIndex)
	//	pointSet2.Item->push_back(cvPoint(pAPosition[dIndex][0], pAPosition[dIndex][1]));
	//}
	//cvCircleObj outCircle2;
	//if(pointSet2.Item->size() == 0)
	//	return;
	//FitCircleObj(pointSet2, &outCircle2);

	//IplImage* currImage1 = cvCreateImage(cvSize(2000, 3000), IPL_DEPTH_8U, 1);
	//memset(currImage1->imageData, 0, currImage1->height*currImage1->widthStep*sizeof(unsigned char));

	//int bwPosition = 0;
	//for (int hIndex = 0; hIndex < currImage1->width; hIndex++)
	//{
	//	int y1 = 0, y2 = 0;
	//	y1 = sqrt( outCircle1.Radius * outCircle1.Radius - hIndex * hIndex);
	//	y2 = sqrt( outCircle2.Radius * outCircle2.Radius - hIndex * hIndex);
	//	for (int wIndex = 0; wIndex < currImage1->height; wIndex++)
	//	{
	//		bwPosition = hIndex*currImage1->widthStep + wIndex;
	//		if( wIndex < y1)
	//			currImage1->imageData[bwPosition] = 0;
	//		else if (wIndex > y1 && wIndex < y2)
	//			currImage1->imageData[bwPosition] = 255;
	//		else
	//			currImage1->imageData[bwPosition] = 0;
	//	}
	//}
	//cvShowImage("currImage1",currImage1);
	//cvWaitKey(0);

	//cvReleaseImage(&currImage1);
	//return;


	int hIndex = 0, wIndex = 0, ImagePosition = 0, TempPosition = 0, colorValue = 0;
	bool leakLight = false;	int LeakLightNum = 5;

	//IplImage* currImageBefore = cvLoadImage("00.bmp", CV_LOAD_IMAGE_GRAYSCALE);

	if (currImageBefore == NULL)
		return 1;

	if (maskImage == NULL)
	{
		return 2;
	}

	IplImage* currImage = cvCreateImage(cvSize(currImageBefore->width, currImageBefore->height), IPL_DEPTH_8U, 1);
	memset(currImage->imageData, 0, currImage->height*currImage->widthStep*sizeof(unsigned char));

	//使用Mask
	cvCopy(currImageBefore, currImage, maskImage);

	//cvShowImage("currImage",currImage);
	//cvWaitKey(0);

	IplImage* EdgeImage = cvCreateImage(cvSize(currImageBefore->width, currImageBefore->height), IPL_DEPTH_8U, 1);
	memset(EdgeImage->imageData, 0, EdgeImage->height*EdgeImage->widthStep*sizeof(unsigned char));

	cvCanny(currImage, EdgeImage, 50, 180, 3);

	//cvShowImage("EdgeImage", EdgeImage);
	//cvWaitKey(0);


	int edgeTempPosition = 0;
	for (int hIndex = 1; hIndex < EdgeImage->height - 1; hIndex++)
	{
		for (int wIndex = 1; wIndex < EdgeImage->width - 1; wIndex++)
		{
			edgeTempPosition = hIndex*EdgeImage->widthStep + wIndex;
			if (EdgeImage->imageData[edgeTempPosition] == 255)
			if (maskImage->imageData[edgeTempPosition + 1] == 0
				|| maskImage->imageData[edgeTempPosition - 1] == 0
				|| maskImage->imageData[edgeTempPosition + maskImage->widthStep] == 0
				|| maskImage->imageData[edgeTempPosition - maskImage->widthStep] == 0)
				EdgeImage->imageData[edgeTempPosition] = 0;
		}
	}

	//cvShowImage("EdgeImage2", EdgeImage);
	////cvSaveImage("E:\\wuxi\\EdgeImage.jpg",EdgeImage);
	//cvWaitKey(0);

	ListPoint pointSet; ListPoint bestPoint; ListPoint tempPoint;
	pointSet.Item = (ArrayPoint*)MallocArrayPoint();
	bestPoint.Item = (ArrayPoint*)MallocArrayPoint();
	tempPoint.Item = (ArrayPoint*)MallocArrayPoint();


	ListPoint pointSet13, pointSet23, pointSet33;
	ListPoint bestPoint13, bestPoint23, bestPoint33;
	pointSet13.Item = (ArrayPoint*)MallocArrayPoint();
	pointSet23.Item = (ArrayPoint*)MallocArrayPoint();
	pointSet33.Item = (ArrayPoint*)MallocArrayPoint();

	bestPoint13.Item = (ArrayPoint*)MallocArrayPoint();
	bestPoint23.Item = (ArrayPoint*)MallocArrayPoint();
	bestPoint33.Item = (ArrayPoint*)MallocArrayPoint();

	IplImage* markImage = cvCreateImage(cvGetSize(currImage), IPL_DEPTH_8U, 1);
	memset(markImage->imageData, 0, markImage->height*markImage->widthStep*sizeof(unsigned char));

	//ArrayPoint* PointArray = (ArrayPoint*)MallocArrayPoint();

	ListRect rectList; ListInt intAreaList;
	rectList.Item = (ArrayRect*)MallocArrayRect();
	intAreaList.Item = (ArrayInt *)MallocArrayInt();

	ExtractAllEdgePointNumForItem(EdgeImage, markImage, cvRect(0, 0, currImageBefore->width, currImageBefore->height), 255, &pointSet);

	//未搜寻到边缘点,可能是已经边缘效果不好或者是无料
	if (pointSet.Item->size() == 0 || pointSet.Item->size() < 10)
	{
		cvReleaseImage(&currImageBefore);
		//cvReleaseImage(&maskImage);
		cvReleaseImage(&markImage);
		cvReleaseImage(&currImage);
		cvReleaseImage(&EdgeImage);
		return 3;
	}

	CvPoint PartTempPoint;

	for (int dIndex = 0; dIndex < pointSet.Item->size() / 3; dIndex++)
	{
		PartTempPoint = (*pointSet.Item)[dIndex];

		AddArrayPoint(pointSet13.Item, PartTempPoint);
	}

	cvCircleObj TempCircle;

	memset(markImage->imageData, 0, markImage->height*markImage->widthStep*sizeof(unsigned char));

	//之前使用方法为全部进行ransac滤除,之后为缩短时间修改为1/3、1/3、1/3进行滤除
	//RansacCirclePoint(pointSet, &bestPoint, &tempPoint);
	//if(bestPoint.Item->size() == 0)
	//{
	//	cvReleaseImage(&currImageBefore);
	//	cvReleaseImage(&maskImage);
	//	cvReleaseImage(&markImage);
	//	cvReleaseImage(&currImage);
	//	cvReleaseImage(&EdgeImage);
	//	return;
	//}
	//SortPointsListByXValue(&bestPoint);

	RansacCirclePoint(pointSet13, &bestPoint13, &tempPoint);

	if (bestPoint13.Item->size() == 0)
	{
		cvReleaseImage(&currImageBefore);
		//cvReleaseImage(&maskImage);
		cvReleaseImage(&markImage);
		cvReleaseImage(&currImage);
		cvReleaseImage(&EdgeImage);
		return 4;
	}

	cvCircleObj outCircle;
	cvCircleObj outCircle13, outCircle23, outCircle33;
	//ListPoint bestPoint13, bestPoint23, bestPoint33;
	//bestPoint13.Item = (ArrayPoint*)MallocArrayPoint();	
	//bestPoint23.Item = (ArrayPoint*)MallocArrayPoint();	
	//bestPoint33.Item = (ArrayPoint*)MallocArrayPoint();	

	//for (int dIndex = 0; dIndex< bestPoint.Item->size()/3; dIndex++)
	//{
	//	AddArrayPoint(bestPoint13.Item, (*bestPoint.Item)[dIndex]);
	//}
	FitCircleObj(bestPoint13, &outCircle13);

	if (outCircle13.CirclePoint.x < 0 || outCircle13.CirclePoint.y >0)
	{
		for (int dIndex = pointSet.Item->size() / 3; dIndex < 2 * pointSet.Item->size() / 3; dIndex++)
		{
			PartTempPoint = (*pointSet.Item)[dIndex];

			AddArrayPoint(pointSet23.Item, PartTempPoint);
		}

		RansacCirclePoint(pointSet23, &bestPoint23, &tempPoint);

		if (bestPoint23.Item->size() == 0)
		{
			cvReleaseImage(&currImageBefore);
			//cvReleaseImage(&maskImage);
			cvReleaseImage(&markImage);
			cvReleaseImage(&currImage);
			cvReleaseImage(&EdgeImage);
			return 5;
		}

		FitCircleObj(bestPoint23, &outCircle23);

		if (outCircle23.CirclePoint.x < 0 || outCircle23.CirclePoint.y >0)
		{
			for (int dIndex = 2 * pointSet.Item->size() / 3; dIndex < pointSet.Item->size(); dIndex++)
			{
				PartTempPoint = (*pointSet.Item)[dIndex];

				AddArrayPoint(pointSet33.Item, PartTempPoint);
			}

			RansacCirclePoint(pointSet33, &bestPoint33, &tempPoint);

			if (bestPoint33.Item->size() == 0)
			{
				cvReleaseImage(&currImageBefore);
				//cvReleaseImage(&maskImage);
				cvReleaseImage(&markImage);
				cvReleaseImage(&currImage);
				cvReleaseImage(&EdgeImage);
				return 6;
			}

			FitCircleObj(bestPoint33, &outCircle33);

			if (outCircle33.CirclePoint.x < 0 || outCircle33.CirclePoint.y >0)
			{
				outCircle.CirclePoint.x = 0;
				outCircle.CirclePoint.y = 1;
				outCircle.Radius = 1;
			}
			else
				outCircle = outCircle33;;
		}
		else
			outCircle = outCircle23;

	}
	else
		outCircle = outCircle13;

	//FitCircleObj(bestPoint, &outCircle);

	if (outCircle.CirclePoint.y == 1 && outCircle.Radius == 1)
	{
		cvReleaseImage(&currImageBefore);
		//cvReleaseImage(&maskImage);
		cvReleaseImage(&markImage);
		cvReleaseImage(&currImage);
		cvReleaseImage(&EdgeImage);
		return 7;
	}

	ListPoint pointOutCircleSet;
	pointOutCircleSet.Item = (ArrayPoint*)MallocArrayPoint();

	int radiusAdd = 0;
	int radiusMove = 35;

	for (int dIndex = 0; dIndex < VL_MAX(bestPoint.Item->size(), 0); dIndex++)
	{
		CvPoint TempPoint;
		TempPoint.x = ((*bestPoint.Item)[dIndex]).x;
		TempPoint.y = ((*bestPoint.Item)[dIndex]).y + radiusMove;
		AddArrayPoint(pointOutCircleSet.Item, TempPoint);

	}


	ListPoint pointMoreCircleSet;
	pointMoreCircleSet.Item = (ArrayPoint*)MallocArrayPoint();

	for (int wIndex = 0; wIndex < currImageBefore->width; wIndex++)
	{
		CvPoint TempPoint;
		CvPoint TempOutPoint;
		float x = 0, y = 0;
		x = wIndex - outCircle.CirclePoint.x;
		//y = dIndex - outCircle.CirclePoint.y;

		y = sqrt((outCircle.Radius + radiusMove) * (outCircle.Radius + radiusMove) - x * x);

		TempPoint.x = wIndex;
		if (outCircle.CirclePoint.y < 0)
			y = VL_MAX(0, outCircle.CirclePoint.y + y);
		else
			y = VL_MAX(0, outCircle.CirclePoint.y - y);

		TempPoint.y = y;
		if (TempPoint.x >= 0 && TempPoint.y >= 0)
			AddArrayPoint(pointMoreCircleSet.Item, TempPoint);
	}

	SortPointsListByXValue(&pointMoreCircleSet);



	int maskCircleTemp = 0;

	for (int wIndex = 0; wIndex < markImage->width; wIndex++)
	{
		for (int hIndex = 0; hIndex < markImage->height; hIndex++)
		{
			maskCircleTemp = hIndex*markImage->widthStep + wIndex;

			if (hIndex <= ((*pointMoreCircleSet.Item)[wIndex]).y)
			{
				markImage->imageData[maskCircleTemp] = 255;
			}

		}
	}

	int a[4] = { 0 };

	//currImageBefore = cvLoadImage("E:\\wuxi\\leak\\1532.bmp", CV_LOAD_IMAGE_GRAYSCALE);
	getMaxDistance(currImageBefore, &pointMoreCircleSet, a);

	int subPotion = 0;
	for (int hIndex = 0; hIndex < markImage->height; hIndex++)
	{
		for (int wIndex = 0; wIndex < markImage->width; wIndex++)
		{
			subPotion = hIndex*markImage->widthStep + wIndex;

			//currImageBefore->imageData[subPotion] -= maskCircle->imageData[subPotion];
			//if (maskCircle->imageData[subPotion] == 255)
			//	currImageBefore->imageData[subPotion] = 0;
			if (currImageBefore->imageData[subPotion] - markImage->imageData[subPotion] <= 0)
				currImageBefore->imageData[subPotion] = 0;

			if (currImageBefore->imageData[subPotion] - markImage->imageData[subPotion] >= 255)
				currImageBefore->imageData[subPotion] = 255;

		}
	}

	cvThreshold(currImageBefore, markImage, 110, 255, CV_THRESH_BINARY);

	//cvShowImage("currbf", currImageBefore);
	//cvWaitKey(0);


	CvPoint LineCenter;
	CvPoint LineCenter2;
	LineCenter2.x = (int)outCircle.CirclePoint.x;
	LineCenter2.y = (int)outCircle.CirclePoint.y;

	//LineCenter.x = (a[1]+a[0]/2);
	LineCenter.x = a[1] + a[0] / 2;
	LineCenter.y = a[3];


	CvSize imgSize = cvSize(currImageBefore->width, currImageBefore->height);

	CvRect Zone;
	Zone.x = 0;
	Zone.y = 0;
	Zone.width = 0;
	Zone.height = 0;

	int checkLightValue = 55;
	bool Zone0 = false, Zone1 = false, Zone2 = false, Zone3 = false, Zone4 = false;
	//没有豁口区域
	if (a[0] == 0)
	{
		if (getMinYPositon(&pointMoreCircleSet, 0, currImage->width, &Zone, a, imgSize, 0))
		{
			if (getMinYPositon(&pointMoreCircleSet, 0, currImageBefore->width, &Zone, a, imgSize, 0))
				Zone0 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);
			//IplImage* ZoneImg = cvCreateImage(cvSize(Zone.width, Zone.height), IPL_DEPTH_8U, 1);
			//memset(ZoneImg->imageData, 0, ZoneImg->height*ZoneImg->widthStep*sizeof(unsigned char));

			//IplImage* ZoneImg2 = cvCreateImage(cvSize(Zone.width, Zone.height), IPL_DEPTH_8U, 1);
			//memset(ZoneImg2->imageData, 0, ZoneImg2->height*ZoneImg2->widthStep*sizeof(unsigned char));

			//cvSetImageROI(currImageBefore,Zone);

			//cvCopy(currImageBefore,ZoneImg);  

			//cvResetImageROI(currImageBefore); 

			//cvThreshold(ZoneImg, ZoneImg2, 110, 255, CV_THRESH_BINARY);	

			//int leakNum = 0;
			//int bwPosition = 0;
			//for (int hIndex = 0; hIndex < ZoneImg2->height; hIndex++)
			//{
			//	for (int wIndex = 0; wIndex < ZoneImg2->width; wIndex++)
			//	{
			//		bwPosition = hIndex*ZoneImg2->widthStep + wIndex;
			//		if( ZoneImg2->imageData[bwPosition] == 255)
			//			leakNum++;					
			//	}
			//}

			//if (leakNum > LeakLightNum)
			//{
			//	leakLight = true;
			//	//cvShowImage("ZoneImage2", ZoneImg2);
			//	//cvWaitKey(0);
			//}

			//cvReleaseImage(&ZoneImg);
		}
		//getMinYPositon(ListPoint* line, int firstPosition, int lastPosition, CvRect* zone, int* a, int direction)

	}
	else//有豁口区域
	{
		//getMinYPositon
		if (a[1] > currImageBefore->width / 2)
		{
			if (getMinYPositon(&pointMoreCircleSet, a[2], currImageBefore->width, &Zone, a, imgSize, 4))
				Zone4 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);

			if (getMinYPositon(&pointMoreCircleSet, 0, a[1], &Zone, a, imgSize, 3))
				Zone3 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);
		}

		if (a[1] <= currImageBefore->width / 2)
		{
			if (getMinYPositon(&pointMoreCircleSet, 0, a[1], &Zone, a, imgSize, 1))
				Zone1 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);

			if (getMinYPositon(&pointMoreCircleSet, a[2], currImageBefore->width, &Zone, a, imgSize, 2))
				Zone2 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);
		}
	}

	cvReleaseImage(&currImageBefore);
	//cvReleaseImage(&maskImage);
	cvReleaseImage(&markImage);
	cvReleaseImage(&currImage);
	cvReleaseImage(&EdgeImage);

	bool lastResult = false;
	if (Zone0 || Zone1 || Zone2 || Zone3 || Zone4)
	{
		lastResult = true;
		return -1;
	}

	lastResult = false;
	return 0;

}
コード例 #11
0
// Function to detect and draw any faces that is present in an image
bool face_detect( IplImage* srcImg, std::vector<CvRect> *vectFaces, CvHaarClassifierCascade* cascade, bool detect_only)
{

    // Create memory for calculations
    static CvMemStorage* storage = 0;

    // Create two points to represent the face locations
    CvPoint pt1, pt2;
    int i;

    if( !cascade )
    {
        MY_LOG("%s: faceCascade is NULL", __FILE__);
        return false;
    }

    // Allocate the memory storage
    storage = cvCreateMemStorage(0);

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

    int max_width = 0;
    // Find whether the cascade is loaded, to find the biggest face. If yes, then:
    if( cascade )
    {

        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence

        DURATION_START;
        CvSeq* faces = cvHaarDetectObjects( srcImg, cascade, storage,
                1.1, 2, CV_HAAR_DO_CANNY_PRUNING
                , cvSize(20, 20) 
                );
        DURATION_STOP("cvHaarDetectObjects()");

        // Loop the number of faces found.
        for( i = 0, max_width=0; i < faces->total; i++ )
        {
            // Create a new rectangle for the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

            if(vectFaces != NULL) {
                vectFaces -> push_back(*r);
            }

            MY_LOG("%s: found face <%d,%d> with %dx%d\n", __func__, r->x, r->y, r->width, r->height);

            if(!detect_only) {
                // Draw the rectangle in the input image
                cvRectangle( srcImg, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
            }

            if(max_width < r->width) {

                pt1.x = r->x;
                pt2.x = (r->x + r->width);
                pt1.y = r->y;
                pt2.y = (r->y + r->height);

                max_width = r->width;

            }
        }

        if(max_width <= 4) {
            return false;
        }

        //printf("%s: (%d,%d), (%d,%d) -> (%d * %d)\n", __func__, pt1.x, pt1.y, pt2.x, pt2.y, (pt2.x - pt1.x) , (pt2.y - pt1.y));

        //cvSetImageROI(srcImg, cvRect(pt1.x, pt1.y, pt2.x - pt1.x, pt2.y - pt1.y));
        //// __android_log_print(ANDROID_LOG_DEBUG, "run to here ", "func:%s, line:%d", __func__,__LINE__);
        //// printf("%s: srcImg ROI: (%d * %d)\n",__func__, cvGetImageROI(srcImg).width, cvGetImageROI(srcImg).height );
        //IplImage *tmpImg2 = cvCreateImage( cvSize(cvGetImageROI(srcImg).width, cvGetImageROI(srcImg).height), IPL_DEPTH_8U, 1);
        //IplImage *tmpImg = srcImg;
        ////color depth
        //if(srcImg->nChannels != 1)  {
        //    cvCvtColor(srcImg, tmpImg2, CV_BGR2GRAY);
        //    tmpImg = tmpImg2;
        //}

        ////resize
        //*dstImg = cvCreateImage(cvSize(FACE_SIZE, FACE_SIZE), IPL_DEPTH_8U, 1);
        //cvResize(tmpImg, *dstImg, CV_INTER_CUBIC);

        ////__android_log_print(ANDROID_LOG_DEBUG, "run to here ", "func:%s, line:%d", __func__,__LINE__);
        //cvResetImageROI(srcImg);

        //cvReleaseImage(&tmpImg2);
        ////__android_log_print(ANDROID_LOG_DEBUG, "run to here ", "func:%s, line:%d", __func__,__LINE__);
        
        return true;
    }

    return false;
}
コード例 #12
0
//////////////////////////////////
// main()
//
int _tmain(int argc, _TCHAR* argv[])
{
//	try_conv();
	if( !initAll() ) 
		exitProgram(-1);

	// Capture and display video frames until a face
	// is detected
	int frame_count = 0;
	while( (char)27!=cvWaitKey(1) )
	{
		//Retrieve next image and 
		// Look for a face in the next video frame
		
		//read into pfd_pVideoFrameCopy
		if (!captureVideoFrame()){
			if (frame_count==0)
				throw exception("Failed before reading anything");
			break; //end of video..
		}
		++frame_count;

		CvSeq* pSeq = 0;
		detectFaces(pfd_pVideoFrameCopy,&pSeq);
		
		//Do some filtration of pSeq into pSeqOut, based on history etc,
		//update data structures (history ,face threads etc.)s
		list<Face> & faces_in_this_frame = FdProcessFaces(pfd_pVideoFrameCopy,pSeq);

		//== draw rectrangle for each detected face ==
		if (!faces_in_this_frame.empty()){	//faces detected (??)
			int i = 0;
			for(list<Face>::iterator face_itr = faces_in_this_frame.begin(); face_itr != faces_in_this_frame.end(); ++face_itr)
			{
				CvPoint pt1 = cvPoint(face_itr->x,face_itr->y);
				CvPoint pt2 = cvPoint(face_itr->x + face_itr->width,face_itr->y + face_itr->height);
				if (face_itr->frame_id == frame_count) //detected for this frame
					cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i++%3],3,8,0);
				else //from a previous frame
					cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i++%3],1,4,0);
			}
		}else{ //no faces detected
			Sleep(100);
		}

		cvShowImage( DISPLAY_WINDOW, pfd_pVideoFrameCopy );
		cvReleaseImage(&pfd_pVideoFrameCopy);
	
	} //end input while
	cout << "==========================================================" << endl;
	cout << "========== Input finished ================================" << endl;
	cout << "==========================================================" << endl << endl;
	
	cout << "Press a key to continue with history playback" <<endl;
	char cc = fgetc(stdin);


	cout << "==========================================================" << endl;
	cout << "==== Playback history + rectangles +                 =====" << endl;
	cout << "==== create output video(s)						  =====" << endl;
	cout << "==========================================================" << endl << endl;
	list<FDHistoryEntry> & pHistory = FdGetHistorySeq();
	
	//== VIDEO WRITER START =====================
	int isColor = 1;
	int fps     = 12;//30;//25;  // or 30
	int frameW  = 640; // 744 for firewire cameras
	int frameH  = 480; // 480 for firewire cameras
	CvVideoWriter * playbackVidWriter=cvCreateVideoWriter((OUTPUT_PLAYBACK_VIDEOS_DIR + "\\playback.avi").c_str(),
								PFD_VIDEO_OUTPUT_FORMAT,
							   fps,cvSize(frameW,frameH),isColor);
	CvVideoWriter *  croppedVidWriter = 0;
	if (!playbackVidWriter) {
		cerr << "can't create vid writer" << endl;
		exitProgram(-1);
	}
	bool wasWrittenToVideo = false;
	//== VIDEO WRITER END =====================

	int index = 0;
	// play recorded sequence----------------------------
	// i.e. just what's in the history
	int playback_counter = 0;

	cout << "start finding consensus rect " << endl;
	//find min max
	bool found =false;
	int min_x = INT_MAX,//pFaceRect->x,
		max_x = 0,//pFaceRect->x+pFaceRect->width,
		min_y = INT_MAX,//pFaceRect->y,
		max_y = 0;//pFaceRect->y+pFaceRect->height;
	for (list<FDHistoryEntry>::iterator itr = pHistory.begin() ; itr != pHistory.end(); ++itr)
	{
		CvSeq* pFacesSeq = itr->pFacesSeq;
		assert(pFacesSeq);
		//TODO Might want to convert to Face here
		CvRect * pFaceRect = (CvRect*)cvGetSeqElem(pFacesSeq, 0); //works only on first rec series
		if (pFaceRect){
			found = true;
			if (pFaceRect->x < min_x) min_x = pFaceRect->x;
			if (pFaceRect->x+pFaceRect->width > max_x) max_x = pFaceRect->x + pFaceRect->width;
			
			if (pFaceRect->y < min_y) min_y = pFaceRect->y;
			if (pFaceRect->y+pFaceRect->height > max_y) max_y =  pFaceRect->y+pFaceRect->height;
		}
	}
	//assert(found); //some rect in history..
	CvRect consensus_rect;
	consensus_rect.x = min_x;
	consensus_rect.y = min_y;
	consensus_rect.width  = max_x - min_x;
	consensus_rect.height = max_y - min_y;

	Sleep(3000); //just to make sure that pruneHistory isn't modifying..
	cout << "start playback loop " << endl;
	int k = 0;
	for (list<FDHistoryEntry>::iterator itr = pHistory.begin() ; itr != pHistory.end(); ++itr)
	{
		cout << ++k << endl;
		//cvResetImageROI(history_itr->pFrame);  //now reset by FDFaceThread
		pfd_pVideoFrameCopy = cvCreateImage( cvGetSize(itr->pFrame ), 8, 3 ); //TODO query image for its properties
		cvCopy( itr->pFrame , pfd_pVideoFrameCopy, 0 );
		CvSeq* pFacesSeq = itr->pFacesSeq;
#ifndef NO_RECTS_ON_PLAYBACK
		for(int i = 0 ;i < pFacesSeq->total ;i++){				
			Face * pFaceRect = (Face*)cvGetSeqElem(pFacesSeq, i);
			assert(pFaceRect != NULL);
			CvPoint pt1 = cvPoint(pFaceRect->x,pFaceRect->y);
			CvPoint pt2 = cvPoint(pFaceRect->x + pFaceRect->width,pFaceRect->y + pFaceRect->height);
			if (itr->frame_id == pFaceRect->frame_id)
				cvRectangle( pfd_pVideoFrameCopy, pt1, pt2,	 colorArr[i%3],3,8,0);
			else
				cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i%3],1,4,0);
		}
#endif
		if (pFacesSeq->total > 0) 
		{	
			assert(found);
			//write 1st sequence if exists to cropped vid
			if (!croppedVidWriter)
				croppedVidWriter=cvCreateVideoWriter((OUTPUT_PLAYBACK_VIDEOS_DIR + "\\cropped_playback.avi").c_str(),
									PFD_VIDEO_OUTPUT_FORMAT,
	 						   fps,cvSize(max_x-min_x,max_y-min_y),isColor);
			assert(croppedVidWriter);


			cvResetImageROI(pfd_pVideoFrameCopy);
			cvSetImageROI(pfd_pVideoFrameCopy,consensus_rect);
			//write cropped image to video file
			IplImage *croppedImg = cvCreateImage(cvGetSize(pfd_pVideoFrameCopy),
								   pfd_pVideoFrameCopy->depth,
								   pfd_pVideoFrameCopy->nChannels);	
			assert(croppedImg);
			cvCopy(pfd_pVideoFrameCopy, croppedImg, NULL);
			assert(croppedVidWriter);
			cvWriteFrame(croppedVidWriter,croppedImg);
			cvReleaseImage(&croppedImg);
		}

		cvShowImage( DISPLAY_WINDOW, pfd_pVideoFrameCopy );
		cvResetImageROI(pfd_pVideoFrameCopy); //CROP_PLAYBACK_FACE
		cvWriteFrame(playbackVidWriter,pfd_pVideoFrameCopy);
		if( (char)27==cvWaitKey(1) ) break;//exitProgram(0);
		Sleep(50);	
		++playback_counter;	
	}

	
	cvReleaseVideoWriter(&playbackVidWriter);
	cvReleaseVideoWriter(&croppedVidWriter);
	exitProgram(0);
	//-----------------------------------------------------------
	//-----------------------------------------------------------
	//-----------------------------------------------------------
}
コード例 #13
0
ファイル: captureOpenCV.c プロジェクト: DonghunP/carSDK
void *ControlThread(void *unused)
{
    int i=0;
    char fileName[30];
    NvMediaTime pt1 ={0}, pt2 = {0};
    NvU64 ptime1, ptime2;
    struct timespec;

    IplImage* imgOrigin;
    IplImage* imgCanny;

    // cvCreateImage
    imgOrigin = cvCreateImage(cvSize(RESIZE_WIDTH, RESIZE_HEIGHT), IPL_DEPTH_8U, 3);
    imgCanny = cvCreateImage(cvGetSize(imgOrigin), IPL_DEPTH_8U, 1);

    int angle, speed;
    IplImage* imgOrigin;
    IplImage* imgResult;
    unsigned char status;

    unsigned int gain;

    CarControlInit();
    PositionControlOnOff_Write(UNCONTROL);
    SpeedControlOnOff_Write(1);

    //speed controller gain set
    //P-gain
    gain = SpeedPIDProportional_Read();        // default value = 10, range : 1~50
    printf("SpeedPIDProportional_Read() = %d \n", gain);
    gain = 20;
    SpeedPIDProportional_Write(gain);

    //I-gain
    gain = SpeedPIDIntegral_Read();        // default value = 10, range : 1~50
    printf("SpeedPIDIntegral_Read() = %d \n", gain);
    gain = 20;
    SpeedPIDIntegral_Write(gain);

    //D-gain
    gain = SpeedPIDDifferential_Read();        // default value = 10, range : 1~50
    printf("SpeedPIDDefferential_Read() = %d \n", gain);
    gain = 20;
    SpeedPIDDifferential_Write(gain);
    angle = 1460;
    SteeringServoControl_Write(angle);
    // cvCreateImage
    imgOrigin = cvCreateImage(cvSize(RESIZE_WIDTH, RESIZE_HEIGHT), IPL_DEPTH_8U, 3);

    imgResult = cvCreateImage(cvGetSize(imgOrigin), IPL_DEPTH_8U, 1);
    int flag = 1;
    while(1)
    {
        pthread_mutex_lock(&mutex);
        pthread_cond_wait(&cond, &mutex);


        GetTime(&pt1);
        ptime1 = (NvU64)pt1.tv_sec * 1000000000LL + (NvU64)pt1.tv_nsec;



        Frame2Ipl(imgOrigin); // save image to IplImage structure & resize image from 720x480 to 320x240
        pthread_mutex_unlock(&mutex);


        cvCanny(imgOrigin, imgCanny, 100, 100, 3);

        sprintf(fileName, "captureImage/imgCanny%d.png", i);
        cvSaveImage(fileName , imgCanny, 0);
		Frame2Ipl(imgOrigin, imgResult); // save image to IplImage structure & resize image from 720x480 to 320x240
        pthread_mutex_unlock(&mutex);


        //cvCanny(imgOrigin, imgCanny, 100, 100, 3);

        sprintf(fileName, "captureImage/imgyuv%d.png", i);
        cvSaveImage(fileName , imgOrigin, 0);


        //sprintf(fileName, "captureImage/imgOrigin%d.png", i);
        //cvSaveImage(fileName, imgOrigin, 0);


        // TODO : control steering angle based on captured image ---------------

        //speed set
        speed = DesireSpeed_Read();
        printf("DesireSpeed_Read() = %d \n", speed);
        //speed = -10;
        //DesireSpeed_Write(speed);
        if(flag == 1){
            if(greenlight>1000)
            {
                printf("right go\n");
                Winker_Write(LEFT_ON);
                usleep(1000000);
                //Winker_Write(ALL_OFF);
                angle = 1400;
                SteeringServoControl_Write(angle);
                speed = 10;
                DesireSpeed_Write(speed);
                speed = DesireSpeed_Read();
                printf("DesireSpeed_Read() = %d \n", speed);
                sleep(1);
                flag = 0;
            }
            else
            {
                printf("left go\n");
                Winker_Write(RIGHT_ON);
                usleep(10000);
                Winker_Write(ALL_OFF);

                speed = 20;
                DesireSpeed_Write(speed);
                usleep(1300000);
                angle = 1950;
                SteeringServoControl_Write(angle);
                usleep(5000000);
                angle = 1460;
                SteeringServoControl_Write(angle);
                usleep(1000000);
                speed = 0;
                DesireSpeed_Write(speed);
                flag = 0;
            }
        }

        // ---------------------------------------------------------------------

        GetTime(&pt2);
        ptime2 = (NvU64)pt2.tv_sec * 1000000000LL + (NvU64)pt2.tv_nsec;
        printf("--------------------------------operation time=%llu.%09llu[s]\n", (ptime2-ptime1)/1000000000LL, (ptime2-ptime1)%1000000000LL);


        i++;
    }

}
コード例 #14
0
ファイル: calibrator.cpp プロジェクト: goretkin/kwc-ros-pkg
  void process_image()
  {

    //    std::cout << "Checking publish count: " << image_in->publish_count << std::endl;

    //    image_in->lock_atom();

    if (image_in->publish_count > 0) {

      cvSetData(cvimage_in, codec_in->get_raster(), 3*704);
      cvConvertImage(cvimage_in, cvimage_bgr, CV_CVTIMG_SWAP_RB);

      //      image_in->unlock_atom();

      CvSize board_sz = cvSize(12, 12);
      CvPoint2D32f* corners = new CvPoint2D32f[12*12];
      int corner_count = 0;
    
      //This function has a memory leak in the current version of opencv!
      int found = cvFindChessboardCorners(cvimage_bgr, board_sz, corners, &corner_count, 
      					  CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);



      IplImage* gray = cvCreateImage(cvSize(cvimage_bgr->width, cvimage_bgr->height), IPL_DEPTH_8U, 1);
      cvCvtColor(cvimage_bgr, gray, CV_BGR2GRAY);
      cvFindCornerSubPix(gray, corners, corner_count, 
      			 cvSize(5, 5), cvSize(-1, -1),
      			 cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.01f ));
      cvReleaseImage(&gray);


      if (take_pic && corner_count == 144) {
	std::stringstream ss;
	img_cnt++;
	ss << dir_name << "/Image" << img_cnt << ".jpg";
	//	std::ofstream imgfile(ss.str().c_str());
	//	imgfile.write((char*)image_in->jpeg_buffer, image_in->compressed_size);
	//	imgfile.close();

	cvSaveImage(ss.str().c_str(), cvimage_bgr);
	
	ss.str("");
	ss << dir_name << "/Position" << img_cnt << ".txt";

	std::ofstream posfile(ss.str().c_str());
	observe->lock_atom();
	posfile << "P: " << observe->pan_val << std::endl
		<< "T: " << observe->tilt_val << std::endl
		<< "Z: " << observe->lens_zoom_val << std::endl
		<< "F: " << observe->lens_focus_val;
	observe->unlock_atom();

	posfile.close();

	take_pic = false;
      }

      float maxdiff = 0;

      for(int c=0; c<12*12; c++) {
	float diff = sqrt( pow(corners[c].x - last_corners[c].x, 2.0) + 
		     pow(corners[c].y - last_corners[c].y, 2.0));
	last_corners[c].x = corners[c].x;
	last_corners[c].y = corners[c].y;

	if (diff > maxdiff) {
	  maxdiff = diff;
	}
      }

      printf("Max diff: %g\n", maxdiff);


      cvDrawChessboardCorners(cvimage_bgr, board_sz, corners, corner_count, found);

      if (undistort) {
	cvUndistort2(cvimage_bgr, cvimage_undistort, intrinsic_matrix, distortion_coeffs);
      } else {
	cvCopy(cvimage_bgr, cvimage_undistort);
      }

      CvFont font;
      cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2);    
      std::stringstream ss;

      observe->lock_atom();
      ss << "P: " << observe->pan_val;
      ss << " T: " << observe->tilt_val;
      ss << " Z: " << observe->lens_zoom_val;
      ss << " F: " << observe->lens_focus_val;
      observe->unlock_atom();
      cvPutText(cvimage_undistort, ss.str().c_str(), cvPoint(15,30), &font, CV_RGB(255,0,0));

      ss.str("");

      ss << "Found " << corner_count << " corners";
      if (centering) {
	ss << " -- Autocentering";
      }
      cvPutText(cvimage_undistort, ss.str().c_str(), cvPoint(15,60), &font, CV_RGB(255,0,0));

      image_out->width = 704;
      image_out->height = 480;
      image_out->compression = "raw";
      image_out->colorspace = "rgb24";

      //      codec_out->realloc_raster_if_needed();
      cvSetData(cvimage_out, codec_out->get_raster(), 3*image_out->width);      
      cvConvertImage(cvimage_undistort, cvimage_out, CV_CVTIMG_SWAP_RB);

      codec_out->set_flow_data();

      image_out->publish();


      CvPoint2D32f COM = cvPoint2D32f(0,0);
    
      if (centering && corner_count > 20) {
	//average corners:
	for (int i = 0; i < corner_count; i++) {
	  COM.x += corners[i].x / corner_count;
	  COM.y += corners[i].y / corner_count;
	}
      
	if ( (fabs(COM.x - 354.0) > 10) || (fabs(COM.y - 240.0) > 10) ) {
	  float rel_pan,rel_tilt;

	  rel_pan = (COM.x - 354.0) * .001;
	  rel_tilt = -(COM.y - 240.0) * .001;

	  control->pan_val = rel_pan;      
	  control->pan_rel = true;
	  control->pan_valid = true;

	  control->tilt_val = rel_tilt;
	  control->tilt_rel = true;
	  control->tilt_valid = true;

	  control->publish();
	}

      }

      delete[] corners;
      
    } else {
      //      image_in->unlock_atom();
    }
  }
コード例 #15
0
//将所有模块连接使用的函数
//根据这个来修改自己的
 int RunBlobTrackingAuto2323(CvCapture* pCap, CvBlobTrackerAuto* pTracker, char* fgavi_name , char* btavi_name )
{
	int                     OneFrameProcess = 0;
	int                     key;
	int                     FrameNum = 0;
	CvVideoWriter*          pFGAvi = NULL;
	CvVideoWriter*          pBTAvi = NULL;

	/* Main loop: */
	/*OneFrameProcess =0 时,为waitkey(0) 不等待了,返回-1,waitkey(1)表示等1ms,如果按键了返回按键,超时返回-1*/
	for (FrameNum = 0; pCap && (key = cvWaitKey(OneFrameProcess ? 0 : 1)) != 27;//按下esc键整个程序结束。 
		FrameNum++)
	{   /* Main loop: */// 整个程序的主循环。这个循环终止,意味着这个程序结束。
		IplImage*   pImg = NULL;
		IplImage*   pMask = NULL;

		if (key != -1)
		{
			OneFrameProcess = 1;
			if (key == 'r')OneFrameProcess = 0;
		}

		pImg = cvQueryFrame(pCap);//读取视频
		if (pImg == NULL) break;


		/* Process: */
		pTracker->Process(pImg, pMask);//处理图像。这个函数应该执行完了所有的处理过程。

		if (fgavi_name)//参数设置了fg前景要保存的文件名
		if (pTracker->GetFGMask())//前景的图像的mask存在的话,保存前景。画出团块 
		{   /* Debug FG: */
			IplImage*           pFG = pTracker->GetFGMask();//得到前景的mask
			CvSize              S = cvSize(pFG->width, pFG->height);
			static IplImage*    pI = NULL;

			if (pI == NULL)pI = cvCreateImage(S, pFG->depth, 3);
			cvCvtColor(pFG, pI, CV_GRAY2BGR);

			if (fgavi_name)//保存前景到视频
			{   /* Save fg to avi file: */
				if (pFGAvi == NULL)
				{
					pFGAvi = cvCreateVideoWriter(
						fgavi_name,
						CV_FOURCC('x', 'v', 'i', 'd'),
						25,
						S);
				}
				cvWriteFrame(pFGAvi, pI);//写入一张图
			}

			//画出团块的椭圆
			if (pTracker->GetBlobNum() > 0) //pTracker找到了blob
			{   /* Draw detected blobs: */
				int i;
				for (i = pTracker->GetBlobNum(); i > 0; i--)
				{
					CvBlob* pB = pTracker->GetBlob(i - 1);//得到第i-1个blob
					CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB));//团块中心
					//这个宏竟然是个强制转换得来的。见下行。
					//#define CV_BLOB_CENTER(pB) cvPoint2D32f(((CvBlob*)(pB))->x,((CvBlob*)(pB))->y)
					CvSize  s = cvSize(MAX(1, cvRound(CV_BLOB_RX(pB))), MAX(1, cvRound(CV_BLOB_RY(pB))));
					//通过宏 获得团块的w 和h 的size
					int c = cvRound(255 * pTracker->GetState(CV_BLOB_ID(pB)));
					cvEllipse(pI,//在图中,对团块画圆
						p,
						s,
						0, 0, 360,
						CV_RGB(c, 255 - c, 0), cvRound(1 + (3 * c) / 255));
				}   /* Next blob: */;
			}
			cvNamedWindow("FG", 0);
			cvShowImage("FG", pI);
		}   /* Debug FG. *///如果要保存结果,对前景保存,画出团块


		//在原图上:找到的blob附近写下id
		/* Draw debug info: */
		if (pImg)//原始的每帧图像。
		{   /* Draw all information about test sequence: */
			char        str[1024];
			int         line_type = CV_AA;   // Change it to 8 to see non-antialiased graphics.
			CvFont      font;
			int         i;
			IplImage*   pI = cvCloneImage(pImg);

			cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 0.7, 0.7, 0, 1, line_type);

			for (i = pTracker->GetBlobNum(); i > 0; i--)
			{
				CvSize  TextSize;
				CvBlob* pB = pTracker->GetBlob(i - 1);
				CvPoint p = cvPoint(cvRound(pB->x * 256), cvRound(pB->y * 256));
				CvSize  s = cvSize(MAX(1, cvRound(CV_BLOB_RX(pB) * 256)), MAX(1, cvRound(CV_BLOB_RY(pB) * 256)));
				int c = cvRound(255 * pTracker->GetState(CV_BLOB_ID(pB)));


				//画团块到原始图像上
				cvEllipse(pI,
					p,
					s,
					0, 0, 360,
					CV_RGB(c, 255 - c, 0), cvRound(1 + (3 * 0) / 255), CV_AA, 8);


				//下面代码的大概意思就是在找到的blob附近写下id
				p.x >>= 8;
				p.y >>= 8;
				s.width >>= 8;
				s.height >>= 8;
				sprintf(str, "%03d", CV_BLOB_ID(pB));
				cvGetTextSize(str, &font, &TextSize, NULL);
				p.y -= s.height;
				cvPutText(pI, str, p, &font, CV_RGB(0, 255, 255));
				{
					const char* pS = pTracker->GetStateDesc(CV_BLOB_ID(pB));

					if (pS)
					{
						char* pStr = MY_STRDUP(pS);
						char* pStrFree = pStr;

						while (pStr && strlen(pStr) > 0)
						{
							char* str_next = strchr(pStr, '\n');

							if (str_next)
							{
								str_next[0] = 0;
								str_next++;
							}

							p.y += TextSize.height + 1;
							cvPutText(pI, pStr, p, &font, CV_RGB(0, 255, 255));
							pStr = str_next;
						}
						free(pStrFree);
					}
				}

			}   /* Next blob. */;

			cvNamedWindow("Tracking", 0);
			cvShowImage("Tracking", pI);

			if (btavi_name && pI)//如果这一帧存在且,你想把图像存起来,就是传过来的参数不为空例如  btavi_name=“1.avi"   就能存起来了。
			{   /* Save to avi file: */
				CvSize      S = cvSize(pI->width, pI->height);
				if (pBTAvi == NULL)
				{
					pBTAvi = cvCreateVideoWriter(
						btavi_name,
						CV_FOURCC('x', 'v', 'i', 'd'),
						25,
						S);
				}
				cvWriteFrame(pBTAvi, pI);
			}

			cvReleaseImage(&pI);
		}   /* Draw all information about test sequence. */
	}   /*  Main loop. */

	if (pFGAvi)cvReleaseVideoWriter(&pFGAvi);
	if (pBTAvi)cvReleaseVideoWriter(&pBTAvi);
	return 0;
}   /* RunBlobTrackingAuto */
コード例 #16
0
ファイル: PlateFinder.cpp プロジェクト: duyz112/ANPR-Demo
void PlateFinder::ImageRestoration(IplImage *src)
{
	int w = src->width;
	int h = src->height;

	IplImage *mImg = cvCreateImage(cvSize(w/2, h/2), IPL_DEPTH_8U, 1);		// Anh su dung cho bien doi hinh thai hoc
	IplImage *src_pyrdown = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);
	IplImage *tmp = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);
	IplImage *thresholed = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);	// Anh nhi phan voi nguong
	IplImage *mini_thresh = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);
	IplImage *dst = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);			// Anh lam ro vung bien so

	cvPyrDown (src, src_pyrdown);

	cvMorphologyEx(src_pyrdown, mImg, tmp, S2, CV_MOP_BLACKHAT);
	cvNormalize(mImg, mImg, 0, 255, CV_MINMAX);


	// Nhi phan hoa anh mImg
	cvThreshold(mImg, thresholed, (int)10*cvAvg(mImg).val[0], 255, CV_THRESH_BINARY);
	cvZero(dst);
	cvCopy(thresholed, mini_thresh);

	// Su dung hinh chu nhat co size = 8x16 truot tren toan bo anh
	
	int cnt;
	int nonZero1, nonZero2, nonZero3, nonZero4;
	CvRect rect;

	for (int i = 0; i < mini_thresh->width-32; i+=4)
	{
		for (int j = 0; j  < mini_thresh->height-16; j+=4)
		{
			rect = cvRect(i, j, 16, 8);
			cvSetImageROI (mini_thresh, rect);	//ROI = Region of Interest
			nonZero1 = cvCountNonZero(mini_thresh);
			cvResetImageROI(mini_thresh);

			rect = cvRect(i+16, j, 16, 8);
			cvSetImageROI (mini_thresh, rect);	//ROI = Region of Interest
			nonZero2 = cvCountNonZero(mini_thresh);
			cvResetImageROI(mini_thresh);

			rect = cvRect(i, j+8, 16, 8);
			cvSetImageROI (mini_thresh, rect);	//ROI = Region of Interest
			nonZero3 = cvCountNonZero(mini_thresh);
			cvResetImageROI(mini_thresh);

			rect = cvRect(i+16, j+8, 16, 8);
			cvSetImageROI (mini_thresh, rect);	//ROI = Region of Interest
			nonZero4 = cvCountNonZero(mini_thresh);
			cvResetImageROI(mini_thresh);

			cnt = 0;
			if (nonZero1 > 15) { cnt++; }
			if (nonZero2 > 15) { cnt++; }
			if (nonZero3 > 15) { cnt++; }
			if (nonZero4 > 15) { cnt++; }

			if (cnt > 2)
			{
				rect = cvRect (i, j, 32, 16);
				cvSetImageROI(dst, rect);
				cvSetImageROI(mini_thresh, rect);
				cvCopy(mini_thresh, dst);
				cvResetImageROI(dst);
				cvResetImageROI(mini_thresh);
			}
		}
	}

	IplImage* dst_clone = cvCloneImage(dst);

	cvDilate(dst, dst, NULL, 2);
	cvErode(dst, dst, NULL, 2);
	cvDilate(dst, dst, S1, 9);
	cvErode(dst, dst, S1, 10);
	cvDilate(dst, dst);

	/*cvShowImage("Source" , src);
	cvShowImage("mImg", mImg);	
	cvShowImage("mini_thresh", mini_thresh);	
	cvShowImage("dst_clone", dst_clone);	
	cvShowImage("dst", dst);*/

	cvPyrUp(dst, src);

	cvReleaseImage(&mini_thresh);
	cvReleaseImage(&mImg);
	cvReleaseImage(&tmp);
	cvReleaseImage(&dst);
	cvReleaseImage(&src_pyrdown);
	cvReleaseImage(&thresholed);
	cvReleaseImage(&dst_clone);
}
コード例 #17
0
ファイル: Lk.cpp プロジェクト: fivejjs/htld
int trackLK(IplImage *imgI,
            IplImage *imgJ,
            float ptsI[],
            int nPtsI,
            float ptsJ[],
            int nPtsJ,
            int level,
            float *fbOut,
            float *nccOut,
            char *statusOut,
            FastTracking *fastTr,
            FAST_TRACKING_STR *fastTrStr)
#endif
{
    //TODO: watch NaN cases
    //double nan = std::numeric_limits<double>::quiet_NaN();
    //double inf = std::numeric_limits<double>::infinity();

    // tracking
    int I, J, winsize_ncc;
    CvSize pyr_sz;
    int i;

    //if unused std 5
    if(level == -1)
    {
        level = 5;
    }

    I = 0;
    J = 1;
    winsize_ncc = 10;

    //NOTE: initImgs() must be used correctly or memleak will follow.
    pyr_sz = cvSize(imgI->width + 8, imgI->height / 3);
    PYR[I] = cvCreateImage(pyr_sz, IPL_DEPTH_32F, 1);
    PYR[J] = cvCreateImage(pyr_sz, IPL_DEPTH_32F, 1);

    // Points
    if(nPtsJ != nPtsI)
    {
        printf("Inconsistent input!\n");
        return 0;
    }

#ifndef USE_HTLD
    points[0] = (CvPoint2D32f *) malloc(nPtsI * sizeof(CvPoint2D32f)); // template
    points[1] = (CvPoint2D32f *) malloc(nPtsI * sizeof(CvPoint2D32f)); // target
    points[2] = (CvPoint2D32f *) malloc(nPtsI * sizeof(CvPoint2D32f)); // forward-backward
#else
    points[0] = new cv::Point2f[nPtsI]; // template
    points[1] = new cv::Point2f[nPtsI]; // target
    points[2] = new cv::Point2f[nPtsI]; // forward-backward
#endif

    char *statusBacktrack = (char *) malloc(nPtsI);

    for(i = 0; i < nPtsI; i++)
    {
        points[0][i].x = ptsI[2 * i];
        points[0][i].y = ptsI[2 * i + 1];
        points[1][i].x = ptsJ[2 * i];
        points[1][i].y = ptsJ[2 * i + 1];
        points[2][i].x = ptsI[2 * i];
        points[2][i].y = ptsI[2 * i + 1];
    }

#ifndef USE_HTLD
    //lucas kanade track
    cvCalcOpticalFlowPyrLK(imgI, imgJ, PYR[I], PYR[J], points[0], points[1],
                           nPtsI, cvSize(win_size_lk, win_size_lk), level, statusOut, 0, cvTermCriteria(
                               CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
                           CV_LKFLOW_INITIAL_GUESSES);

    //backtrack
    cvCalcOpticalFlowPyrLK(imgJ, imgI, PYR[J], PYR[I], points[1], points[2],
                           nPtsI, cvSize(win_size_lk, win_size_lk), level, statusBacktrack, 0, cvTermCriteria(
                               CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
                           CV_LKFLOW_INITIAL_GUESSES | CV_LKFLOW_PYR_A_READY | CV_LKFLOW_PYR_B_READY);
#else
    fastTr->runFBOpticalFlows(fastTrStr,
                              nPtsI,
                              (uchar*)statusOut,
                              (uchar*)statusBacktrack,
                              points[0],
                              points[1],
                              points[2]);
#endif

    for(i = 0; i < nPtsI; i++)
    {
        if(statusOut[i] && statusBacktrack[i])
        {
            statusOut[i] = 1;
        }
        else
        {
            statusOut[i] = 0;
        }
    }

    normCrossCorrelation(imgI,
                         imgJ,
                         points[0],
                         points[1],
                         nPtsI,
                         statusOut,
                         nccOut,
                         winsize_ncc,
                         CV_TM_CCOEFF_NORMED);
    euclideanDistance(points[0], points[2], fbOut, nPtsI);

    for(i = 0; i < nPtsI; i++)
    {
        if(statusOut[i] == 1)
        {
            ptsJ[2 * i]     = points[1][i].x;
            ptsJ[2 * i + 1] = points[1][i].y;
        }
        else //flow for the corresponding feature hasn't been found
        {
            //Todo: shell realy write N_A_N in it?
            ptsJ[2 * i]     = N_A_N;
            ptsJ[2 * i + 1] = N_A_N;
            fbOut[i]        = N_A_N;
            nccOut[i]       = N_A_N;
        }
    }

#ifndef USE_HTLD
    for(i = 0; i < 3; i++)
    {
        free(points[i]);
        points[i] = 0;
    }
#else
    for(i = 0; i < 3; i++) {
        delete[] points[i];
        points[i] = NULL;
    }
#endif

    free(statusBacktrack);
    return 1;
}
コード例 #18
0
ファイル: extCalib.c プロジェクト: rito96/3Asemester
int main( int argc, char** argv ){
  CvCapture* capture = NULL;
  IplImage* src = NULL;
  IplImage* src2 = NULL;
  IplImage* gray = NULL; 
  IplImage* output = NULL; 

  CvMat* cornerPoints;
  CvMat* objectPoints;
  CvMat pointsNumMat;
  CvPoint2D32f* points;
  int pointsNum[1];

  ChessBoard chess;
  int pointsPerScene;
  int detectedPointsNum;
  int allPointsFound;
  int i, j;
  char key;
  int camID;
  char* windowName = "extrinsic calibration";

  capture = cvCreateCameraCapture(0);

  if(!capture) {
    fprintf(stderr, "ERROR: capture is NULL \n");
    return(-1);
  }

  chess.dx = CHESS_ROW_DX;
  chess.dy = CHESS_COL_DY;
  chess.patternSize.width = CHESS_ROW_NUM;
  chess.patternSize.height = CHESS_COL_NUM;

  pointsPerScene 
    = chess.patternSize.width * chess.patternSize.height;


  cornerPoints = cvCreateMat(pointsPerScene, 2, CV_32F);
  objectPoints = cvCreateMat(pointsPerScene, 3, CV_32F);

  pointsNum[0] = pointsPerScene;
  pointsNumMat = cvMat(1, 1, CV_32S, pointsNum);

  points 
    = (CvPoint2D32f*)malloc( sizeof(CvPoint2D32f) * pointsPerScene ) ;

  src = cvQueryFrame(capture);

  if(src == NULL){
    fprintf(stderr, "Could not grab and retrieve frame...\n");
    return(-1);
  }

  src2 = cvCreateImage(cvSize(src->width, src->height), src->depth, 3);
  output = cvCreateImage(cvSize(src->width, src->height), src->depth, 3);
  
  cvCopy( src, src2, NULL ); 

  gray = cvCreateImage(cvSize(src2->width, src2->height), src2->depth, 1);
  
  cvNamedWindow( windowName, CV_WINDOW_AUTOSIZE );

  while( 1 ){
    src = cvQueryFrame(capture);
    if( !src ) {
      break;
    }
    cvCopy( src, src2, NULL ); 

    cvCopy( src2, output, NULL );

    cvCvtColor(src2, gray, CV_BGR2GRAY);
    
    if( cvFindChessboardCorners( gray, chess.patternSize, points, 
        &detectedPointsNum, CV_CALIB_CB_ADAPTIVE_THRESH ) ){
      cvFindCornerSubPix(gray, points, detectedPointsNum, 
        cvSize(5, 5), cvSize(-1, -1), 
        cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.1));
      allPointsFound = 1;
    } else {
      allPointsFound = 0;
    }
    
    cvDrawChessboardCorners( src2, chess.patternSize, points, 
      detectedPointsNum, allPointsFound );

    cvShowImage(windowName, src2);

    key = cvWaitKey( 20 );
    if(key == RETURN && allPointsFound ){
      store2DCoordinates( cornerPoints, points, chess, 0 );
      store3DCoordinates( objectPoints, chess, 0 );
      calibrateCamera("intrinsic_param_ref.txt", 
        "extrinsic_param.txt", 
        cornerPoints, objectPoints );
      cvSaveImage( "board.jpg", output, 0 );
      break;
    } else if(key == ESCAPE) {
      break;
    }
  }

  cvDestroyWindow( windowName );

  cvReleaseCapture(&capture);

  free(points);
  cvReleaseMat(&cornerPoints);
  cvReleaseMat(&objectPoints);
  cvReleaseImage(&gray);
  cvReleaseImage(&src2);

  return(0);
}
コード例 #19
0
ファイル: cvview.c プロジェクト: bjo3rn/libfreenect
int main( int argc, char** argv ) { 
	
	int res;
	int i;
	
	for (i=0; i<2048; i++) {
		float v = i/2048.0;
		v = powf(v, 3)* 6;
		t_gamma[i] = v*6*256;
	}
	
	printf("Kinect camera test\n");
	
	if (freenect_init(&f_ctx, NULL) < 0) {
		printf("freenect_init() failed\n");
		return 1;
	}
	
	if (freenect_open_device(f_ctx, &f_dev, 0) < 0) {
		printf("Could not open device\n");
		return 1;
	}
	
	cvNamedWindow( "RGB", CV_WINDOW_AUTOSIZE );
	cvMoveWindow( "RGB", 0, 0);
	rgbBack = cvCreateImage(cvSize(FREENECT_FRAME_W, FREENECT_FRAME_H), IPL_DEPTH_8U, 3);
	rgbFront = cvCreateImage(cvSize(FREENECT_FRAME_W, FREENECT_FRAME_H), IPL_DEPTH_8U, 3);
	
	cvNamedWindow( "Depth", CV_WINDOW_AUTOSIZE );
	cvMoveWindow("Depth", FREENECT_FRAME_W, 0);
	depthBack = cvCreateImage(cvSize(FREENECT_FRAME_W, FREENECT_FRAME_H), IPL_DEPTH_8U, 3);
	depthFront = cvCreateImage(cvSize(FREENECT_FRAME_W, FREENECT_FRAME_H), IPL_DEPTH_8U, 3);
	
	freenect_set_depth_callback(f_dev, depth_cb);
	freenect_set_rgb_callback(f_dev, rgb_cb);
	freenect_set_rgb_format(f_dev, FREENECT_FORMAT_RGB);
	freenect_set_depth_format(f_dev, FREENECT_FORMAT_11_BIT);
	
	res = pthread_create(&kinect_thread, NULL, kinect_threadFunc, NULL);
	if (res) {
		printf("pthread_create failed\n");
		return 1;
	}
	
	freenect_start_depth(f_dev);
	freenect_start_rgb(f_dev);
	
	while(1) {  
		pthread_mutex_lock(&backbuf_mutex);
		{	
			while (got_frames < 2) {
				pthread_cond_wait(&framesReady_cond, &backbuf_mutex);
			}
			
			cvConvertImage(rgbBack, rgbFront, CV_BGR2GRAY);
			cvConvertImage(depthBack, depthFront, CV_BGR2GRAY);
			
			got_frames = 0;
		}
		pthread_mutex_unlock(&backbuf_mutex);
		
		
		cvShowImage("RGB", rgbFront);
		cvShowImage("Depth", depthFront);
		
		char c = cvWaitKey(10);
		if( c == 27 ) break;
	}
	
	
	cvDestroyWindow( "RGB" );
	cvDestroyWindow( "Depth" );
}
コード例 #20
0
ファイル: TennisSimple.cpp プロジェクト: JuannyWang/CVTrack
int main(int argc, char* argv[])
{
 
    // Default capture size - 640x480
    CvSize size = cvSize(640,480);
 
    // Open capture device. 0 is /dev/video0, 1 is /dev/video1, etc.
    CvCapture* capture = cvCaptureFromCAM( 0 );
    if( !capture )
    {
            fprintf( stderr, "ERROR: capture is NULL \n" );
            getchar();
            return -1;
    }
 
    // Create a window in which the captured images will be presented
    cvNamedWindow( "Camera", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE );
 
    // Detect a red ball
    int hl = 60, hu = 75, sl = 255, su = 256, vl = 170, vu = 256;
    
 
    IplImage *  hsv_frame    = cvCreateImage(size, IPL_DEPTH_8U, 3);
    IplImage*  thresholded   = cvCreateImage(size, IPL_DEPTH_8U, 1);
 
    while( 1 )
    {
        // Get one frame
        IplImage* frame = cvQueryFrame( capture );
        if( !frame )
        {
                fprintf( stderr, "ERROR: frame is null...\n" );
                getchar();
                break;
        }
 
        CvScalar hsv_min = cvScalar(hl, sl, vl, 0);
        CvScalar hsv_max = cvScalar(hu, su, vu, 0);

        // Covert color space to HSV as it is much easier to filter colors in the HSV color-space.
        cvCvtColor(frame, hsv_frame, CV_RGB2HSV);
        // Filter out colors which are out of range.
        cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
 
        // Memory for hough circles
        CvMemStorage* storage = cvCreateMemStorage(0);
        // hough detector works better with some smoothing of the image
        cvDilate(thresholded, thresholded, NULL, 3);
        cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 );
        
        cvErode(thresholded, thresholded, NULL, 3);
        
        CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 1,
                                        thresholded->height/4, 400, 10, 20, 400);
 
        int maxRadius = 0;
        int x = 0, y = 0;
        bool found = false;
        
        for (int i = 0; i < circles->total; i++)
        {
            float* p = (float*)cvGetSeqElem( circles, i );
            if (p[2] > maxRadius){
                maxRadius = p[2];
                x = p[0];
                y = p[1];
                found = true;
            }
        }

        if (found){
            //printf("Ball! x=%f y=%f r=%f\n\r",p[0],p[1],p[2] );
            cvCircle( frame, cvPoint(cvRound(x),cvRound(y)),
                                    3, CV_RGB(0,255,0), -1, 8, 0 );
            cvCircle( frame, cvPoint(cvRound(x),cvRound(y)),
                                    cvRound(maxRadius), CV_RGB(255,0,0), 3, 8, 0 );
        }
 
        cvShowImage( "Camera", frame ); // Original stream with detected ball overlay
        cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space
        cvShowImage( "After Color Filtering", thresholded ); // The stream after color filtering
 
        cvReleaseMemStorage(&storage);
 
        // Do not release the frame!
 
        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
        //remove higher bits using AND operator
        int key = cvWaitKey(10);
                
                

        switch(key){
            case 'q' : hu += 5; break;
            case 'Q' : hu -= 5; break;
                    
            case 'a' : hl -= 5; break;
            case 'A' : hl += 5; break;
                    
            case 'w' : su += 5; break;
            case 'W' : su -= 5; break;
                    
            case 's' : sl -= 5; break;
            case 'S' : sl += 5; break;
                    
            case 'e' : vu += 5; break;
            case 'E' : vu -= 5; break;
                    
            case 'd' : vl -= 5; break;
            case 'D' : vl += 5; break;
        }

        if (key != -1){
            printf("H: %i, S: %i, V: %i\nH: %i, S: %i, V: %i\n\n", hu, su, vu, hl, sl, vl);
        }
    }
 
     // Release the capture device housekeeping
     cvReleaseCapture( &capture );
     cvDestroyWindow( "mywindow" );
     return 0;
}
コード例 #21
0
ファイル: calibration.cpp プロジェクト: drewm1980/planepower
    void Calibrator::batchCalibrate()
    {
        log(Debug) << "(Calibrator) batchCalibrate entered" << endlog();
		

        int image_width = 0;
        int image_height = 0;
        log(Debug) << "calibration with " << _imageFiles.size() << " images" << endlog();
        for (unsigned int i = 0; i < _imageFiles.size(); ++i) {

            log(Debug) << "load image " << _imageFiles[i].c_str() <<endlog();
            cv::WImageBuffer1_b image( cvLoadImage(_imageFiles[i].c_str(), CV_LOAD_IMAGE_GRAYSCALE) );
            
            image_width = image.Width();
            image_height = image.Height();

            log(Debug) << "working on image name: " << _imageFiles[i].c_str() << endlog();

            int ncorners = 0;
            bool success = _detector->findCorners(image.Ipl(), &_corners[0], &ncorners);
            if (success) {
                log(Debug) << "found corners " << endlog();
              _cal->addView(&_corners[0], _detector->objectPoints(), _corners.size());
            } else {
            }

            cv::WImageBuffer3_b display(image.Width(), image.Height());
            cvCvtColor(image.Ipl(), display.Ipl(), CV_GRAY2BGR);
            cvDrawChessboardCorners(display.Ipl(), cvSize(boardWidth, boardHeight),&_corners[0], ncorners, success);
            cvShowImage("Calibration", display.Ipl());
            cvWaitKey(0);
        }
        /// Calibrate the model
        _cal->calibrate(image_width, image_height);
        /// Save the intrinsic camera parameters to file
        string fileName(imageDirectory);
        fileName.append("Intrinsics.ini");
        _cal->model().save(fileName.c_str());


        fileName = imageDirectory;
        fileName.append("K_cv.xml");
        cvSave(fileName.c_str(),&_cal->model().K_cv() );
        log(Debug) << "intrisics saved to " << fileName << endlog();

        /// Copy the extrinsic camera parameters out of the object
        //cvCopy( _cal.extrinsics_ , _extrinsics);
        _cal->getExtrinsics(*_rot,*_trans);
        
        /// Save the extrinsic data as a test TODO: put it on a bufferport
        //cvSave("extrinsic.xml", &_extrinsics);

        log(Debug) << "before cvSave " << endlog();
        fileName = imageDirectory;
        fileName.append("rot.xml");
        cvSave(fileName.c_str(),_rot);;
        fileName = imageDirectory;
        fileName.append("trans.xml");
        cvSave(fileName.c_str(), _trans);
        log(Debug) << "after cvSave " << endlog();

        log(Debug) << "(Calibrator) batchCalibrate finished" << endlog();
    }
コード例 #22
0
ファイル: main.cpp プロジェクト: jital94/emo_fc
void face_detect_crop(IplImage * input,IplImage * output)
{

    IplImage * img;
    img = cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,1);
    cvCvtColor(input,img,CV_RGB2GRAY);//convert input to Greyscale and store in image
    int face_origin_x,face_origin_y,width,hieght;//variables to crop face


    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 ); //load the face detedction cascade
    storage = cvCreateMemStorage(0);
    int scale = 1;
    CvPoint pt1,pt2;
    int face_number;

    CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,1.1, 2, CV_HAAR_DO_CANNY_PRUNING,cvSize(40, 40) );
    for( face_number = 0; face_number < (faces ? faces->total : 0); face_number++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( faces, face_number );

        //Specifies the points for rectangle.
        /* pt1_____________

           |              |

           |              |

           |              |

           |_____________pt2 */
        pt1.x = r->x*scale;
        pt2.x = (r->x+r->width)*scale;
        pt1.y = r->y*scale;
        pt2.y = (r->y+r->height)*scale;
        cvRectangle( input, pt1, pt2, CV_RGB(255,255,255), 1, 8, 0 );
        CvRect rs=*r;
        //cvNamedWindow("i-O", 1);
        //cvShowImage("i-O",input);
        //cvWaitKey(0);
        cvSetImageROI(img,rs);
    }
    IplImage * frame;
    CvSize s1= {48,48};
    frame=cvCreateImage(s1,IPL_DEPTH_8U,1);

    cvResize(img,frame);
    cvCvtColor(frame,output,CV_GRAY2RGB);

    CvPoint pt;
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name_eye, 0, 0, 0 ); //load the face detedction cascade
    CvSeq* faces1 = cvHaarDetectObjects( input, cascade, storage,1.1, 2, CV_HAAR_DO_CANNY_PRUNING,cvSize(40, 40) );
    for( face_number = 0; face_number < (faces1 ? faces1->total : 0); face_number++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( faces1, face_number );
        pt.x = (r->x*scale);
        pt2.x = ((r->x+r->width)*scale);
        pt.y = (r->y*scale);
        pt2.y = ((r->y+r->height)*scale);
        cvRectangle( input, pt, pt2, CV_RGB(0,255,255), 1, 8, 0 );
    }



}
コード例 #23
0
ファイル: cvgabor.cpp プロジェクト: mdqyy/faceTree
/*!
\fn CvGabor::get_image(int Type)
Get the speific type of image of Gabor

Parameters:
Type		The Type of gabor kernel, e.g. REAL, IMAG, MAG, PHASE   

Returns:
Pointer to image structure, or NULL on failure	

Return an Image (gandalf image class) with a specific Type   "REAL"	"IMAG" "MAG" "PHASE"  
*/
IplImage* CvGabor::get_image(int Type)
{

	if(IsKernelCreate() == false)
	{ 
		perror("Error: the Gabor kernel has not been created in get_image()!\n");
		return NULL;
	}
	else
	{  
		IplImage* pImage;
		IplImage *newimage;
		newimage = cvCreateImage(cvSize(Width,Width), IPL_DEPTH_8U, 1 );
		//printf("Width is %d.\n",(int)Width);
		//printf("Sigma is %f.\n", Sigma);
		//printf("F is %f.\n", F);
		//printf("Phi is %f.\n", Phi);

		//pImage = gan_image_alloc_gl_d(Width, Width);
		pImage = cvCreateImage( cvSize(Width,Width), IPL_DEPTH_32F, 1 );


		CvMat* kernel = cvCreateMat(Width, Width, CV_32FC1);
		CvMat* re = cvCreateMat(Width, Width, CV_32FC1);
		CvMat* im = cvCreateMat(Width, Width, CV_32FC1);
		double ve, ve1,ve2;
		CvScalar S;
		CvSize size = cvGetSize( kernel );
		int rows = size.height;
		int cols = size.width;
		switch(Type)
		{
		case 1:  //Real

			cvCopy( (CvMat*)Real, (CvMat*)kernel, NULL );
			//pImage = cvGetImage( (CvMat*)kernel, pImageGL );
			for (int i = 0; i < rows; i++)
			{
				for (int j = 0; j < cols; j++)
				{
					ve = cvGetReal2D((CvMat*)kernel, i, j);
					cvSetReal2D( (IplImage*)pImage, j, i, ve );
				}
			}
			break;
		case 2:  //Imag
			cvCopy( (CvMat*)Imag, (CvMat*)kernel, NULL );
			//pImage = cvGetImage( (CvMat*)kernel, pImageGL );
			for (int i = 0; i < rows; i++)
			{
				for (int j = 0; j < cols; j++)
				{
					ve = cvGetReal2D((CvMat*)kernel, i, j);
					cvSetReal2D( (IplImage*)pImage, j, i, ve );
				}
			}
			break; 
		case 3:  //Magnitude //add by yao

			cvCopy( (CvMat*)Real, (CvMat*)re, NULL );
			cvCopy( (CvMat*)Imag, (CvMat*)im, NULL );
			for (int i = 0; i < rows; i++)
			{
				for (int j = 0; j < cols; j++)
				{
					ve1 = cvGetReal2D((CvMat*)re, i, j);
					ve2 = cvGetReal2D((CvMat*)im, i, j);
					ve = cvSqrt(ve1*ve1+ve2*ve2);
					cvSetReal2D( (IplImage*)pImage, j, i, ve );
				}
			}
			break;
		case 4:  //Phase
			///@todo
			break;
		}

		cvNormalize((IplImage*)pImage, (IplImage*)pImage, 0, 255, CV_MINMAX, NULL );


		cvConvertScaleAbs( (IplImage*)pImage, (IplImage*)newimage, 1, 0 );

		cvReleaseMat(&kernel);

		cvReleaseImage(&pImage);

		return newimage;
	}
}
コード例 #24
0
ファイル: cv.jit.features.c プロジェクト: fourks/cv.jit
t_jit_err cv_jit_features_matrix_calc(t_cv_jit_features *x, void *inputs, void *outputs)
{
	t_jit_err err=JIT_ERR_NONE;
	long in_savelock=0,out_savelock=0;
	t_jit_matrix_info in_minfo,out_minfo;
	char *out_bp, *in_bp;
	void *in_matrix,*out_matrix;
	int i;
	int roi_w,roi_h,roi_offset;
	float *out_data;
	CvMat source;
	int featureCount = 2048;
	
	//Get pointers to matrices
	in_matrix 	= jit_object_method(inputs,_jit_sym_getindex,0);
	out_matrix  = jit_object_method(outputs,_jit_sym_getindex,0);

	if (x&&in_matrix&&out_matrix) 
	{
		//Lock the matrices
		in_savelock = (long) jit_object_method(in_matrix,_jit_sym_lock,1);
		out_savelock = (long) jit_object_method(out_matrix,_jit_sym_lock,1);
		
		//Make sure input is of proper format
		jit_object_method(in_matrix,_jit_sym_getinfo,&in_minfo);
		jit_object_method(out_matrix,_jit_sym_getinfo,&out_minfo);

		if(in_minfo.dimcount != 2)
		{
			err = JIT_ERR_MISMATCH_DIM;
			goto out;
		}
		if(in_minfo.planecount != 1)
		{
			err = JIT_ERR_MISMATCH_PLANE;
			goto out;
		}
		if(in_minfo.type != _jit_sym_char)
		{
			err = JIT_ERR_MISMATCH_TYPE;
			goto out;
		}
		
		//Don't process if one dimension is < 2
		if((in_minfo.dim[0] < 2)||(in_minfo.dim[1] < 2))
			goto out;
		
		if(x->useroi)
		{
			CLIP_ASSIGN(x->roi[0],0,in_minfo.dim[0]);
			CLIP_ASSIGN(x->roi[1],0,in_minfo.dim[1]);
			CLIP_ASSIGN(x->roi[2],0,in_minfo.dim[0]);
			CLIP_ASSIGN(x->roi[3],0,in_minfo.dim[1]);
			
			x->roi[0] = MIN(x->roi[0], x->roi[2]);
			x->roi[1] = MIN(x->roi[1], x->roi[3]);
			x->roi[2] = MAX(x->roi[0], x->roi[2]);
			x->roi[3] = MAX(x->roi[1], x->roi[3]);
			
			roi_w = x->roi[2] - x->roi[0];
			roi_h = x->roi[3] - x->roi[1];
			
			if(roi_w == 0)
				roi_w = in_minfo.dim[0] - x->roi[0];
			if(roi_h == 0)
				roi_h = in_minfo.dim[1] - x->roi[1];
				
			roi_offset = x->roi[1] * in_minfo.dimstride[1] + x->roi[0];
			
			jit_object_method(in_matrix,_jit_sym_getdata,&in_bp);
			
			//Convert Jitter matrix to OpenCV matrix
			cvInitMatHeader( &source, roi_h, roi_w, CV_8UC1, in_bp + roi_offset, in_minfo.dimstride[1] );
		}
		else
		{
			//Convert Jitter matrix to OpenCV matrix
			cvJitter2CvMat(in_matrix, &source);
		}
		
		//Adjust the size of eigImage and tempImage if need be
		if((source.cols != x->eigImage->cols)||(source.rows != x->eigImage->rows))
		{
			cvReleaseMat(&(x->eigImage));
			x->eigImage = cvCreateMat( source.rows, source.cols, CV_32FC1 );
			cvReleaseMat(&(x->tempImage));
			x->tempImage = cvCreateMat( source.rows, source.cols, CV_32FC1 );
		}
		
		//Adjust parameters
		x->threshold = MAX(0.001,x->threshold);
		x->distance = MAX(1,x->distance);
		
		//Calculate
		cvGoodFeaturesToTrack( &source, x->eigImage, x->tempImage,x->features, &featureCount,x->threshold, x->distance,NULL, x->aperture,0, 0.04 );
		
		if(x->precision == 1){
			int minsize = (x->aperture*2)+5;
			
			//Error check for cvFindCornerSubPix
			if((featureCount>0)&&(source.cols > minsize)&&(source.rows > minsize))
				cvFindCornerSubPix( &source, x->features, featureCount, cvSize(x->aperture,x->aperture),cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER, 10, 0.1f));
		}
		
		
		//Prepare output
		out_minfo.dim[0] = featureCount;
		jit_object_method(out_matrix,_jit_sym_setinfo,&out_minfo);
		jit_object_method(out_matrix,_jit_sym_getinfo,&out_minfo);
		jit_object_method(out_matrix,_jit_sym_getdata,&out_bp);
		if (!out_bp) { err=JIT_ERR_INVALID_OUTPUT; goto out;}
		
		out_data = (float *)out_bp;
		
		if(x->useroi)
		{
			for(i=0; i < featureCount; i++)
			{
				out_data[0] = x->features[i].x + x->roi[0];
				out_data[1] = x->features[i].y + x->roi[1];
				
				out_data += 2;
			}
		}
		else
		{
			for(i=0; i < featureCount; i++)
			{
				out_data[0] = x->features[i].x;
				out_data[1] = x->features[i].y;
				
				out_data += 2;
			}
		}
	}

	
out:
	jit_object_method(out_matrix,gensym("lock"),out_savelock);
	jit_object_method(in_matrix,gensym("lock"),in_savelock);
	return err;
}
コード例 #25
0
ファイル: litearry.c プロジェクト: muzilike/roubosys
//视频设备和显示设备初始化和预览函数(加设备状态检测)--------------------------------
int video_fb_init_preview()
{
	//串口相关变量-------------------------------
	char buff[512];
	int nread=0;
	int FrameDone=0;//一帧数据结束标志
	int FrameCount=0;//记录帧长度
	int j=0;
	int key=0;//开关标志
	int stat=0;//视频设备状态标志
	//-------------------------------------------
	
	int numBufs;

	//--------------------------------------------
	//SDL yuv
	SDL_Surface      *pscreen;
	SDL_Overlay      *overlay;
	SDL_Rect         drect;
	SDL_Event        sdlevent;
	SDL_mutex        *affmutex;
	unsigned char    *p = NULL;
	unsigned char    frmrate;
	unsigned int     currtime;
	unsigned int     lasttime;
	char* status = NULL;

	//SDL RGB
	unsigned int     rmask;
	unsigned int     gmask;
	unsigned int     bmask;
	unsigned int     amask;	
	int              bpp;
	int 		 pitch;
	int 		 pixels_num;
	unsigned char    *pixels;
	unsigned char    *p_RGB = NULL;	
	SDL_Surface      *pscreen_RGB;
	SDL_Surface      *display_RGB;
	printf("USB Camera Test\n");

	video_fd = open("/dev/video0", O_RDWR, 0);//打开摄像头设备,使用阻塞方式打开
	if (video_fd<0)
	{
		printf("open error\n");
		return  1;
	}

	/*************先向驱动尝试获取设备视频格式start*************/
	struct v4l2_fmtdesc fmt0;
	int ret0;
	memset(&fmt0,0,sizeof(fmt0));
	fmt0.index = 0;
	fmt0.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
	while((ret0 = ioctl(video_fd,VIDIOC_ENUM_FMT,&fmt0) == 0))
	{
		fmt0.index++;
		printf("%d> pixelformat =%c%c%c%c,description =%s\n",fmt0.index,fmt0.pixelformat&0xff,(fmt0.pixelformat>>8)&0xff,(fmt0.pixelformat>>16)&0xff,(fmt0.pixelformat>>24)&0xff,fmt0.description);
	}
	/**************************END***************************/
	
	//---------------------设置获取视频的格式----------------//
	struct v4l2_format fmt;	
	memset( &fmt, 0, sizeof(fmt));
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//视频数据流类型,永远都V4L2_BUF_TYPE_VIDEO_CAPTURE
	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;//视频源的格式为JPEG或YUN4:2:2或RGB
	fmt.fmt.pix.width = 640;//设置视频宽度
	fmt.fmt.pix.height = 480;//设置视频高度
	//fmt.fmt.pix.field=V4L2_FIELD_INTERLACED;
	//fmt.fmt.pix.colorspace=8;
	//printf("color: %d \n",fmt.fmt.pix.colorspace);
	if (ioctl(video_fd, VIDIOC_S_FMT, &fmt) < 0)//使配置生效
	{
		printf("set format failed\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//+++++++++++++++++++++++++++++++++++++++++++++++++++++++
	//if(SDL_Init(SDL_INIT_VIDEO) < 0)
	//{
	//	printf("SDL Init failed.\n");
	//	exit(1);
	//}
	
	//SDL 设置:YUV输出
	/*
 	pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height,0,SDL_VIDEO_Flags);
	overlay = SDL_CreateYUVOverlay(fmt.fmt.pix.width, fmt.fmt.pix.height,SDL_YUY2_OVERLAY,pscreen);
	p = (unsigned char *)overlay->pixels[0];
	drect.x = 0;
	drect.y = 0;
	drect.w = pscreen->w;
	drect.h = pscreen->h;
	*/

	//SDL 设置:RGB输出
	//pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height, 24, SDL_SWSURFACE | SDL_DOUBLEBUF);
	rmask = 0x000000ff;
	gmask = 0x0000ff00;
	bmask = 0x00ff0000;
	amask = 0x00000000;
	bpp   = 24;
	pitch = fmt.fmt.pix.width*3;
	pixels_num = fmt.fmt.pix.width*fmt.fmt.pix.height*3;
	pixels = (unsigned char *)malloc(pixels_num);
	memset(pixels, 0, pixels_num);
	p_RGB = (unsigned char *)pixels;
	//pscreen_RGB = SDL_CreateRGBSurfaceFrom(pixels, fmt.fmt.pix.width, fmt.fmt.pix.height, bpp, pitch, rmask, gmask, bmask, amask);

	
	//lasttime = SDL_GetTicks();
	//affmutex = SDL_CreateMutex();
	//SDL 设置end
	
	//openCV 设置
	CvMemStorage*  storage = cvCreateMemStorage(0);
	IplImage*      img     = cvCreateImageHeader(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 3);//image头,未开辟数据空间
	IplImage*      imggray = cvCreateImage(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 1);//image,开辟数据空间
	cvNamedWindow("image", 1);

	unsigned char *pRGB = NULL;
	pRGB = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*3*sizeof(unsigned char));
	//openCV 设置 end

	//------------------------申请帧缓冲---------------------//
	struct v4l2_requestbuffers req;
	memset(&req, 0, sizeof (req));
	req.count = 3;	//缓存数量,即可保存的图片数量
	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE
	req.memory = V4L2_MEMORY_MMAP;	//存储类型:V4L2_MEMORY_MMAP或V4L2_MEMORY_USERPTR
	if (ioctl(video_fd, VIDIOC_REQBUFS, &req) == -1)//使配置生效
	{
		perror("request buffer error \n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//--------将VIDIOC_REQBUFS获取内存转为物理空间-------------//
	buffers = calloc(req.count, sizeof(VideoBuffer));	
	//printf("sizeof(VideoBuffer) is %d\n", sizeof(VideoBuffer));
	struct v4l2_buffer buf;
	for (numBufs = 0; numBufs < req.count; numBufs++)
	{
		memset( &buf, 0, sizeof(buf));
		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	
		//存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = numBufs;
		if (ioctl(video_fd, VIDIOC_QUERYBUF, &buf) < 0)//使配置生效
		{
			printf("VIDIOC_QUERYBUF error\n");
			return 2;
		}
		//printf("buf len is %d\n", sizeof(buf));
		buffers[numBufs].length = buf.length;
		buffers[numBufs].offset = (size_t) buf.m.offset;
		//使用mmap函数将申请的缓存地址转换应用程序的绝对地址------
		buffers[numBufs].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE,
			MAP_SHARED, video_fd, buf.m.offset);	
		if (buffers[numBufs].start == MAP_FAILED)
		{
			perror("buffers error\n");
			return 2;
		}
		if (ioctl(video_fd, VIDIOC_QBUF, &buf) < 0)//放入缓存队列
		{
			printf("VIDIOC_QBUF error\n");
			return 2;
		}

	}
	//-------------------------------------------------------//
	
	//----------------------开始视频显示----------------------//
	enum v4l2_buf_type type;
	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (ioctl(video_fd, VIDIOC_STREAMON, &type) < 0)
	{
		printf("VIDIOC_STREAMON error\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//---------------------读取视频源格式---------------------//	
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;				
	if (ioctl(video_fd, VIDIOC_G_FMT, &fmt) < 0)	
	{
		printf("get format failed\n");
		return 2 ;
	}
	else
	{
		printf("Picture:Width = %d   Height = %d\n", fmt.fmt.pix.width, fmt.fmt.pix.height);
		
	}
	//-------------------------------------------------------//
	int i=0;	
	//一些关于fb设备或者没有用到的变量---------------------------
	/*FILE * fd_y_file = 0;
	int a=0;
	int k = 0;
	int i=0;
	//设置显卡设备framebuffer------------------------------------
	struct jpeg_decompress_struct cinfo;
	struct jpeg_error_mgr jerr;
	FILE *infile;//Jpeg文件的句柄
	unsigned char *buffer;
	char *fb_device;
	unsigned int x;
	unsigned int y;
	//打开显卡设备------------------------------------------------
	if ((fb = open("/dev/fb0", O_RDWR)) < 0)
	{
		perror(__func__);
		return 2;
	}

	//获取framebuffer的状态-----------------------------------------
	fb_set(fb);//设置显存参数	
	fb_stat(fb);//获取显卡驱动中的长、宽和显示位宽
	
	printf("frame buffer: %dx%d,  %dbpp, 0x%xbyte= %d,graylevels= %d \n", 
		fbdev.fb_width, fbdev.fb_height, fbdev.fb_bpp, fbdev.fb_size, fbdev.fb_size,fbdev.fb_gray);

	//映射framebuffer的地址到用户空间----------------------------------
	fbdev.fb_mem = mmap (NULL, fbdev.fb_size, PROT_READ|PROT_WRITE,MAP_SHARED,fb,0);
	fbdev.fb = fb;
	*/
		
	//预览采集到的图像(如果有需要可以添加capture功能)-------------------
	while (sdl_quit)
	{
		
		fd_set fds;//文件描述符集,准备使用Select机制
		struct timeval tv;
		int ret1;
		
		FD_ZERO(&fds);//清空文件描述符集
		FD_SET(video_fd,&fds);//将视频设备文件的描述符放入集合中
		
		//消息等待超时,可以完全阻塞-------------------------------
		tv.tv_sec =2;
		tv.tv_usec=0;
		//等待视频设备准备好--------------------------------------
		ret1=select(video_fd+1,&fds,NULL,NULL,&tv);
		if(-1==ret1)
		{
			if(EINTR==errno)
				continue;
			printf("select error. \n");
			exit(EXIT_FAILURE);
		}
		if(0==ret1)
		{
			printf("select timeout. \n");
			continue;
		}		
		while(sdl_quit)		
		{
					 
			//检测退出消息
			while(SDL_PollEvent(&sdlevent))
			{
				if(sdlevent.type == SDL_QUIT)
				{
					sdl_quit = 0;
					break;
				}
			}
			currtime = SDL_GetTicks();
			if(currtime - lasttime >0)
				frmrate = 1000/(currtime-lasttime);
			lasttime = currtime;

			//开始获取FIFO中已经准备好的一帧数据-----------------------		
			memset(&buf ,0,sizeof(buf));
			buf.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
			buf.memory=V4L2_MEMORY_MMAP;
			//准备好的出列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_DQBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}	
	
			//获取当前帧的用户空间首地址,用于格式转换------------------
			unsigned char *ptcur=buffers[buf.index].start;
			//++++++++++++++++++++++++++++++++++++++++
			//算法区
			//+++++++++++++++++++++++++++++++++++++++++
			//灰度变换
			/*
			unsigned char *pgray = NULL;
			pgray = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*2*sizeof(unsigned char));//避免被识别为段错误
			yuv2gray(ptcur,pgray,fmt.fmt.pix.width, fmt.fmt.pix.height);
			*/

			//YUV向RGB(24bit)转换
			YUYVToRGB888(ptcur, pRGB, fmt.fmt.pix.width, fmt.fmt.pix.height);
			
			//opencv 检测人脸
			cvSetData(img, pRGB, fmt.fmt.pix.width*3);//将pRGB数据装入img中
			cvCvtColor(img, imggray, CV_RGB2GRAY);//将img灰度转换到imggray,供opencv检测使用
			CvHaarClassifierCascade *cascade=(CvHaarClassifierCascade*)cvLoad("/usr/share/opencv-2.4.6.1/data/haarcascades/haarcascade_frontalface_alt2.xml", storage,0,0);
			cvClearMemStorage(storage);
			cvEqualizeHist(imggray, imggray);
			CvSeq* objects = cvHaarDetectObjects(imggray, cascade, storage, 1.1, 2, 0, cvSize(30,30),cvSize(30,30));
			
			//opencv 标记人脸
			CvScalar colors[] = {{{255,0,0}},{{0,0,0}}};
			int faces=0;
			for(faces=0; faces < (objects ? objects->total:0); faces++)
			{
				CvRect* r = (CvRect *)cvGetSeqElem(objects,faces);
				cvRectangle(img, cvPoint(r->x, r->y), cvPoint(r->x+r->width, r->y+r->height),colors[0],2,8,0 );//原始图像上加框
			}
			

			//调整opencv img图像数据
			/*CvScalar s;
			int imgi=0,imgj=0,sdlcount=0;
			for(imgi=0;imgi<img->height;imgi++)
			{
				for(imgj=0; imgj<img->width; imgj++)
				{
					s=cvGet2D(img,imgi,imgj);
					pRGB[sdlcount++]=0xff;//s.val[0];//B
					pRGB[sdlcount++]=0xff;//s.val[1];//G
					pRGB[sdlcount++]=0xff;//s.val[2];//R
					//cvSet2D(img,imgi,imgj,s);
				}
			}
			*/
			//opencv 显示图像	
			cvShowImage("image", img);
			char c = cvWaitKey(1);
			printf("%d\n",c);
			if(c==27)
				sdl_quit=0;
			
			
			//yuv载入到SDL
			/*
			SDL_LockYUVOverlay(overlay);
			memcpy(p, pgray,pscreen->w*(pscreen->h)*2);
			SDL_UnlockYUVOverlay(overlay);
			SDL_DisplayYUVOverlay(overlay, &drect);
			*/

			//RGB载入到SDL
			//memcpy(pixels, pRGB, pscreen_RGB->w*(pscreen_RGB->h)*3);
			//SDL_BlitSurface(pscreen_RGB, NULL, display_RGB, NULL);
			//SDL_Flip(display_RGB);

			//统计帧率
			//status = (char *)calloc(1,20*sizeof(char));
			//sprintf(status, "Fps:%d",frmrate);
			//SDL_WM_SetCaption(status, NULL);
			//SDL_Delay(10);
			//用完了的入列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_QBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}
			
		}	
	}	

	//fb_munmap(fbdev.fb_mem, fbdev.fb_size);	//释放framebuffer映射
	//close(fb);//关闭Framebuffer设备
	for(i=0;i<req.count;i++)
	{
		if(-1==munmap(buffers[i].start,buffers[i].length))
			printf("munmap error:%d \n",i);
	}

	cvDestroyWindow("image");
	close(video_fd);					
	SDL_DestroyMutex(affmutex);
	//SDL_FreeYUVOverlay(overlay);
	cvReleaseImage(&img);
	cvReleaseImage(&imggray);
	free(status);
	free(buffers);
	//free(pRGB);
	SDL_Quit();
	return 0;

}
コード例 #26
0
ファイル: track.cpp プロジェクト: sagarjoglekar/ISpy
int track( IplImage* frame, int flag,int Cx,int Cy,int R )
{

    {

        int i, bin_w, c;

        LOGE("#######################Check1############################");

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
            hue = cvCreateImage( cvGetSize(frame), 8, 1 );
            mask = cvCreateImage( cvGetSize(frame), 8, 1 );
            backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
            hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
            histimg = cvCreateImage( cvSize(320,200), 8, 3 );
            cvZero( histimg );
            LOGE("######################Check2###########################");
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, hsv, CV_BGR2HSV );


        {
            int _vmin = vmin, _vmax = vmax;

            cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
                        cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
            cvSplit( hsv, hue, 0, 0, 0 );
            LOGE("###########################Check3######################");
            if(flag==0)
            {
            	LOGE("###############Initialized#############################");
				selection.x=Cx-R;
				selection.y=Cy-R;
				selection.height=2*R;
				selection.width=2*R;
                float max_val = 0.f;
                cvSetImageROI( hue, selection );
                cvSetImageROI( mask, selection );
                cvCalcHist( &hue, hist, 0, mask );
                cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
                cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
                cvResetImageROI( hue );
                cvResetImageROI( mask );
                track_window = selection;
                track_object = 1;

                cvZero( histimg );
                bin_w = histimg->width / hdims;
                for( i = 0; i < hdims; i++ )
                {
                    int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
                    CvScalar color = hsv2rgb(i*180.f/hdims);
                    cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
                                 cvPoint((i+1)*bin_w,histimg->height - val),
                                 color, -1, 8, 0 );
                }
                LOGE("##############Check4#########################");
            }
            LOGE("##############Check5#########################");
            cvCalcBackProject( &hue, backproject, hist );
            cvAnd( backproject, mask, backproject, 0 );
            cvCamShift( backproject, track_window,
                        cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
                        &track_comp, &track_box );
            track_window = track_comp.rect;
            char buffer[50];
            sprintf(buffer,"vals= %d %d and %d",track_window.x,track_window.y,track_window.width);
            LOGE(buffer);
            if( backproject_mode )
                cvCvtColor( backproject, image, CV_GRAY2BGR );
            if( image->origin )
                track_box.angle = -track_box.angle;
            cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
        }

        if( select_object && selection.width > 0 && selection.height > 0 )
        {
            cvSetImageROI( image, selection );
            cvXorS( image, cvScalarAll(255), image, 0 );
            cvResetImageROI( image );
        }

        LOGE("!!!!!!!!!!!!!!!!!!Done Tracking!!!!!!!!!!!!!!!!!!!!!!!!!!!!");


    }



    return 0;
}
// Function for Computing Character Segmentation
// Inputs :-
//			(IplImage*) binImg : Pointer to the 1-Channel Binary Image
//          (int*) numChar : It will contain the number of characters in word
//          (IplImage**) charImg : An array of IplImages where each image corresponds to a particular character
// Outputs :-
//			(unsigned char) errCode : The Error Code of Execution
// Invoked As : errCode = computeCharcterSegmentation( &binImg , &numChar, &charImg );
unsigned char computeCharacterSegmentation( IplImage* binImg , int* numChar, IplImage** charImg)
{
	
	// Check Inputs
	if((binImg->imageSize<=0) || (binImg->nChannels!=1))
	{
		// Error Code 1: Invalid Input
		return(1);
	}
	
	// Character Segmentation
	
	//Finding Headline
	
	int widthStep = binImg->widthStep;
	int baseIndx = -(binImg->widthStep);
	int *count= new int[binImg->height];
	int max=0;
	for(int y=0 ; y< (binImg->height) ; ++y)
	{
		// update base index
		baseIndx = baseIndx + (binImg->widthStep) ;
		
		int currIndx = baseIndx - 1;
	    
	    count[y]=0;
		for(int x =0 ; x < (binImg->width) ; ++x )
		{
			currIndx = currIndx +1;
			
			if( binImg->imageData[currIndx]>-1)
			  {
				  count[y]++;//white -1
				  
			  }			
		}
		if(count[y] > count[max])
		  max=y;		// max stores the row number
	}
	printf("headline : %d\n",max);
	
	// Removing Headline
	baseIndx = -binImg->widthStep;
    for(int y=0 ; y< (binImg->height) ; ++y)
	{
		// update base index
	   baseIndx = baseIndx + (binImg->widthStep) ;
		
	   int currIndx = baseIndx - 1;
	   
	   if(count[y]>= ((count[max]*9)/10 ))
	   {
			for(int x =0 ; x < (binImg->width) ; ++x )
			{
				currIndx = currIndx +1;
				binImg->imageData[currIndx]=-1;//whitening	
		    }
	   }		
	}
	// show word without headline
	cvShowImage("out4",binImg);
	cvWaitKey(20);
	getchar();	
	
	// Segmenting individual characters
	int charcount=0;			
	int th1 =2;
	baseIndx = -1;
	int flag1=0,flag2=0, start1=0,start2=0,end1=0,end2=0;
	for(int x=0 ; x< (binImg->width) ; ++x)
	{
		// update base index
		baseIndx = baseIndx + 1 ;		
		int currIndx = baseIndx - widthStep;
		flag1=0;
		flag2=0;
		for(int y1 = 0; y1 < max ; y1++)
		{
			currIndx = currIndx + widthStep;
			if( binImg->imageData[currIndx] > -1)
			{
				flag1 =1;
				break;
			}
		}
		for(int y =max ; y < (binImg->height) ; ++y )
		{
			currIndx = currIndx +widthStep;
			
			if( binImg->imageData[currIndx]> -1)
			  {
				  flag2=1;
				  break;
			  }			
		}
	    if(flag2==0 or (end2 ==0 and x == binImg->width - 1) )
		{
			if(flag2 ==1)
				end2 = binImg->width -1;
			else 
				end2 = x -1;
			if( end2 - start2 > th1)
			{	
								
				charcount++;
			// Allocate Memory for the Charcater Image
				charImg[charcount] = cvCreateImage( cvSize( end2-start2 + 2 , binImg->height - max ) , IPL_DEPTH_8U , 1 );
				int bindex = -binImg->widthStep;
				int nwidthStep = end2 - start2 + 1 + 4 - (end2-start2 + 1)%4;
				
				int bindex1 = -nwidthStep;
				for(int l=max;l< binImg->height;l++)
				{
					bindex = bindex + binImg->widthStep;
					bindex1 = bindex1 + nwidthStep;
					int cindex = bindex + start2 - 1;
					int cindex1 = bindex1 - 1; 
					for(int m=0;m<nwidthStep;m++)
					{
						cindex = cindex + 1;
						cindex1 = cindex1 + 1;
						charImg[charcount]->imageData[cindex1] = binImg->imageData[cindex]; 
						
					}
				
				}				
				start2 = x + 1;
		     }
		     else 
			{
				start2 = x + 1;			
			}	
		}
		if(flag1==0 or (end1 == 0 and x == binImg->width -1))
		{
			if(flag1==1)
			 end1 = binImg->width -1;
			else
				end1 = x -1;
			if( end1 - start1 > th1)
			{					
				charcount++;
			// Allocate Memory for the Charcater Image
				charImg[charcount] = cvCreateImage( cvSize( end1-start1 + 2 , max) , IPL_DEPTH_8U , 1 );
				int bindex = -binImg->widthStep;
				int nwidthStep = end1 - start1 + 1 + 4 - (end1-start1 + 1)%4;
				
				int bindex1 = -nwidthStep;
				for(int l=0;l< max;l++)
				{
					bindex = bindex + binImg->widthStep;
					bindex1 = bindex1 + nwidthStep;
					int cindex = bindex + start1 - 1;
					int cindex1 = bindex1 - 1; 
					for(int m=0;m<nwidthStep;m++)
					{
						cindex = cindex + 1;
						cindex1 = cindex1 + 1;
						charImg[charcount]->imageData[cindex1] = binImg->imageData[cindex]; 
						
					}
				
				}				
				start1 = x + 1;
		     }
		     else 
			{
				start1 = x + 1;			
			}	
		}
	
	}
			
	*numChar = charcount; 
    // Error Code 0 : All well
    return( 0 );	
}
コード例 #28
0
ファイル: Camera.cpp プロジェクト: ACAVJW4H/BlobTracking
    /*************************************************************************
    Process
        Process the frames in a video one by one.
            1) FG detection
            2) Blob Detection
            3) Blob Tracking and Association
            4) Blob Post Processing
            5) Blob Analysis
            6) Store the results
    Exceptions
        None
    *************************************************************************/
    void Camera::Process(const int startFrameIndex, const int endFrameIndex)
    {
        ASSERT_TRUE ( m_initializied );
        ASSERT_TRUE ( m_pTracker != NULL );

        InitializeDisplayWindows( );

        LOG_CONSOLE( "Start processing " + m_videoFileName );

        int key, oneFrameProcess=0, frameNum; 
        for ( frameNum = 1; 
             m_videoCap.grab() &&
            ( key = cvWaitKey( oneFrameProcess ? 0 : 1 ) ) != 27 &&
            ( frameNum <=  endFrameIndex || endFrameIndex < 0 );
            frameNum++ )
        {
            if ( frameNum >= startFrameIndex )
            {
                std::cout << "frameNum:  " << frameNum << '\r';

                // get the video frame
                m_videoCap.retrieve( m_originalFrameMat );

                // downscale the image if required
                if ( m_downScaleImage )
                {
                    cv::resize( m_originalFrameMat, m_frame,  m_frame.size() );
                }
                else
                {
                    m_frame = m_originalFrameMat;
                }

                m_frameIpl = m_frame; 

                if ( key != -1 )
                {
                    oneFrameProcess = ( key == 'r' ) ? 0 : 1;
                }

                // Process the current frame
                m_pTracker->Process( &m_frameIpl, m_pFGMaskIpl);
                m_fgMask        = m_pTracker->GetFGMask();


                // Process the current video frame using the blob tracker
                IplImage fgMaskIpl = m_fgMask;


                // Save Blob Information in a file
                for( int i = m_pTracker->GetBlobNum(); i> 0; i-- )
                {
                    CvBlob* pBlob = m_pTracker->GetBlob(i-1);

                    ASSERT_TRUE( pBlob != NULL );

                    // Save blob record
                    SaveBlobRecord( pBlob, frameNum );
                }

                if ( m_displayIntermediateResult || m_saveIntermediateResult )
                {
                    char tempString[128];
                    std::string textMessage;
                    //display intermediate result if necessary
                    CvFont    font; 
                    CvSize  TextSize;
                    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 0.7, 0.7, 0, 1, CV_AA );

                    sprintf(tempString,"frame # %d", frameNum);
                    textMessage = tempString;
                    cv::putText( m_originalFrameMat, textMessage, cv::Point(10,20), CV_FONT_HERSHEY_PLAIN, 1, cv::Scalar((0,255,255)));
                    cv::putText( m_fgMask,textMessage, cv::Point(10,20), CV_FONT_HERSHEY_PLAIN, 1, cv::Scalar((0,255,255)));
                    cv::putText( m_frame, textMessage, cv::Point(10,20), CV_FONT_HERSHEY_PLAIN, 1, cv::Scalar((0,255,255)));

                    //drawing blobs if any with green ellipse with m_cvBlob id displayed next to it.
                    int c = 0; // 0: g; 255: red
                    for ( int i = m_pTracker->GetBlobNum(); i > 0; i-- )
                    {
                        CvBlob* pBlob = m_pTracker->GetBlob(i-1);

                        ASSERT_TRUE( pBlob != NULL );

                        cv::Point blobCorner( cvRound( pBlob->x * 256 ), cvRound( pBlob->y * 256 ) );

                        CvSize  blobSize = cvSize( MAX( 1, cvRound( CV_BLOB_RX(pBlob) * 256 ) ), 
                                                   MAX( 1, cvRound( CV_BLOB_RY(pBlob) * 256 ) ) );

                        cv::Scalar boundingBoxColor( c, 255-c, 0 );

                        if ( m_pTracker->GetState( CV_BLOB_ID( pBlob ) ) != 0 )
                        {
                            boundingBoxColor = cv::Scalar( 255-c, c, 0 );
                        }

                        cv::ellipse( m_frame, 
                                    cv::RotatedRect( cv::Point2f( pBlob->x, pBlob->y ), cv::Size2f( pBlob->w, pBlob->h ), 0 ),
                                    cv::Scalar( c, 255-c, 0 ) );
                        blobCorner.x >>= 8;      
                        blobCorner.y >>= 8;
                        
                        blobSize.width >>= 8;
                        blobSize.height >>= 8;
                        blobCorner.y -= blobSize.height;

                        sprintf( tempString, "BlobId=%03d", CV_BLOB_ID(pBlob) );
                        cvGetTextSize( tempString, &font, &TextSize, NULL );
                        
                        cv::putText( m_frame,
                                     std::string( tempString ),
                                     blobCorner,
                                     CV_FONT_HERSHEY_PLAIN,
                                     1,
                                     cv::Scalar( 255, 255, 0, 0 ) );
                    }
                }

                if ( m_displayIntermediateResult )
                {
                    cv::imshow(m_videoFileName+"_FGMask", m_fgMask);
                    cv::imshow(m_videoFileName+"_Tracking", m_frame);
                }

                if ( m_saveIntermediateResult )
                {
                    cv::Mat tmpFrame;
                    cv::cvtColor( m_fgMask, tmpFrame, CV_GRAY2BGR );
                    *m_pFGAvi << tmpFrame;             
                    *m_pBTAvi << m_frame;
                }
            }
// Function for Computing Feature Histogram of Projection based on pixel value
// Inputs :-
//			(IplImage*) binImg : Pointer to the 1-Channel Binary Image
//          (float**) vertProj : Contains normalized number of black pixels in boundary image coulmn wise 
//          (float**) horizProj : Contains normalized number of black pixels in boundary image row wise
// Outputs :-
//			(unsigned char) errCode : The Error Code of Execution
// Invoked As : errCode = computeHistProjPixel( &binImg , &vertProj, &horizProj );
unsigned char computeHistProjPixel( IplImage* binImg , float** vertProj, float** horizProj)
{
	// Check Inputs
	if((binImg->imageSize<=0) || (binImg->nChannels!=1) )
	{
		// Error Code 1: Invalid Input
		return(1);
	}
	
	//Computing Edge of Character Image using Canny Edge Detector
		//open the parameter file
	FILE* fp;
	fp = fopen( "CannyParameter.txt" , "r" );
	
	// Reading the Canny Parameter File
	char paramNameString[100] , eqString[3] , headerString[100];
	fscanf( fp , "%s" , headerString );
	int cannyTh1 = 0;
	fscanf( fp , "%s%s%d" , paramNameString , eqString , &cannyTh1 );
	int cannyTh2 = 0;
	fscanf( fp , "%s%s%d" , paramNameString , eqString , &cannyTh2 );
	int cannyWinSize = 0;
	fscanf( fp , "%s%s%d" , paramNameString , eqString , &cannyWinSize );
	IplImage* edgeImg = NULL;
	edgeImg = cvCreateImage( cvSize( binImg->width , binImg->height)  , IPL_DEPTH_8U , 1 );	
	cvCanny( binImg , edgeImg , cannyTh1 , cannyTh2 , cannyWinSize ); 
	
	fclose(fp);
	
	// Horizontal Profile
		
	int baseIndx = -(binImg->widthStep);
	float *hcount = new float[binImg->height]; 
	float max=0;
	for(int y=0 ; y< (binImg->height) ; ++y)
	{
		// update base index
		baseIndx = baseIndx + (binImg->widthStep) ;
		
		int currIndx = baseIndx - 1;
	    hcount[y]=0;
		for(int x =0 ; x < (binImg->width) ; ++x )
		{
			currIndx = currIndx +1;
			
			if( binImg->imageData[currIndx]<1)
			  {
				  hcount[y] = hcount[y] + 1;;
			  }			
		}
		if(max < hcount[y])
		  max = hcount[y];
	}
	
	for(int i=0; i< binImg->height ; i++)
	{
		hcount[i] = hcount[i] / max;		
	}
		
	// Vertical Projection
	
	max=0;
	float *vcount = new float[binImg->width];
	baseIndx = -1;
	for(int x=0 ; x< (binImg->width) ; ++x)
	{
		// update base index
		baseIndx = baseIndx + 1 ;
		
		int currIndx = baseIndx - binImg->widthStep;
		vcount[x]=0;
		for(int y =0 ; y < (binImg->height) ; ++y )
		{
			currIndx = currIndx + binImg->widthStep;
			
			if( binImg->imageData[currIndx]<1)
			  {				  
				  vcount[x] = vcount[x] + 1;				  
			  }			
		}
		if(max < vcount[x] )
		  max = vcount[x];
	    
	    
	}	
	
	for(int i=0; i< binImg->width ; i++)
	{
		vcount[i] = vcount[i] / max;		
	}
		
	*vertProj = vcount;
	*horizProj = hcount;
		
    // Error Code 0 : All well
    return( 0 );
	
}
コード例 #30
0
ファイル: main.cpp プロジェクト: DaikiMaekawa/openni_sandbox
int main (int argc, char * argv[])
{
    IplImage* camera = 0;

    try {
        // コンテキストの初期化
        xn::Context context;
        XnStatus rc = context.InitFromXmlFile(CONFIG_XML_PATH);
        if (rc != XN_STATUS_OK) {
            throw std::runtime_error(xnGetStatusString(rc));
        }

        // イメージジェネレータの作成
        xn::ImageGenerator image;
        rc = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image);
        if (rc != XN_STATUS_OK) {
            throw std::runtime_error(xnGetStatusString(rc));
        }

        // デプスジェネレータの作成
        xn::DepthGenerator depth;
        rc = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
        if (rc != XN_STATUS_OK) {
            throw std::runtime_error(xnGetStatusString(rc));
        }

        // デプスの座標をイメージに合わせる
        depth.GetAlternativeViewPointCap().SetViewPoint(image);

        // ユーザーの作成
        xn::UserGenerator user;
        rc = context.FindExistingNode( XN_NODE_TYPE_USER, user );
        if ( rc != XN_STATUS_OK ) {
            rc = user.Create(context);
            if ( rc != XN_STATUS_OK ) {
                throw std::runtime_error( xnGetStatusString( rc ) );
            }
        }

        // ユーザー検出機能をサポートしているか確認
        if (!user.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
            throw std::runtime_error("ユーザー検出をサポートしてません");
        }

        XnCallbackHandle userCallbacks, calibrationCallbacks, poseCallbacks;
        XnChar pose[20] = "";

        // キャリブレーションにポーズが必要
        xn::SkeletonCapability skelton = user.GetSkeletonCap();
        if (skelton.NeedPoseForCalibration()) {
            // ポーズ検出のサポートチェック
            if (!user.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) {
                throw std::runtime_error("ポーズ検出をサポートしてません");
            }

            // キャリブレーションポーズの取得
            skelton.GetCalibrationPose(pose);

            // ポーズ検出のコールバックを登録
            xn::PoseDetectionCapability pose = user.GetPoseDetectionCap();
            pose.RegisterToPoseCallbacks(&::PoseDetected, &::PoseLost,
                &user, poseCallbacks);
        }

        // ユーザー認識のコールバックを登録
        user.RegisterUserCallbacks(&::UserDetected, &::UserLost, pose,
            userCallbacks);

        // キャリブレーションのコールバックを登録
        skelton.RegisterCalibrationCallbacks(&::CalibrationStart, &::CalibrationEnd,
            &user, calibrationCallbacks);

        // ユーザートラッキングで、すべてをトラッキングする
        skelton.SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

        // ジェスチャー検出の開始
        context.StartGeneratingAll();

        // カメラサイズのイメージを作成(8bitのRGB)
        XnMapOutputMode outputMode;
        image.GetMapOutputMode(outputMode);
        camera = ::cvCreateImage(cvSize(outputMode.nXRes, outputMode.nYRes),
            IPL_DEPTH_8U, 3);
        if (!camera) {
            throw std::runtime_error("error : cvCreateImage");
        }

        // 表示状態
        bool isShowImage = true;
        bool isShowUser = true;
        bool isShowSkelton = true;

        // メインループ
        while (1) {
            // すべてのノードの更新を待つ
            context.WaitAndUpdateAll();

            // 画像データの取得
            xn::ImageMetaData imageMD;
            image.GetMetaData(imageMD);

            // ユーザーデータの取得
            xn::SceneMetaData sceneMD;
            user.GetUserPixels(0, sceneMD);

            // カメラ画像の表示
            char* dest = camera->imageData;
            const xn::RGB24Map& rgb = imageMD.RGB24Map();
            for (int y = 0; y < imageMD.YRes(); ++y) {
                for (int x = 0; x < imageMD.XRes(); ++x) {
                    // ユーザー表示
                    XnLabel label = sceneMD(x, y);
                    if (!isShowUser) {
                        label = 0;
                    }

                    // カメラ画像の表示
                    XnRGB24Pixel pixel = rgb(x, y);
                    if (!isShowImage) {
                        pixel = xnRGB24Pixel( 255, 255, 255 );
                    }

                    // 出力先に描画
                    dest[0] = pixel.nRed   * Colors[label][0];
                    dest[1] = pixel.nGreen * Colors[label][1];
                    dest[2] = pixel.nBlue  * Colors[label][2];
                    dest += 3;
                }
            }

            // スケルトンの描画
            if (isShowSkelton) {
                XnUserID aUsers[15];
                XnUInt16 nUsers = 15;
                user.GetUsers(aUsers, nUsers);
                for (int i = 0; i < nUsers; ++i) {
                    if (skelton.IsTracking(aUsers[i])) {
                        SkeltonDrawer skeltonDrawer(camera, skelton,
                            depth, aUsers[i]);
                        skeltonDrawer.draw();
                    }
                }
            }

            ::cvCvtColor(camera, camera, CV_BGR2RGB);
            ::cvShowImage("KinectImage", camera);

            // キーイベント
            char key = cvWaitKey(10);
            // 終了する
            if (key == 'q') {
                break;
            }
            // 表示する/しないの切り替え
            else if (key == 'i') {
                isShowImage = !isShowImage;
            }
            else if (key == 'u') {
                isShowUser = !isShowUser;
            }
            else if (key == 's') {
                isShowSkelton = !isShowSkelton;
            }
        }
    }
    catch (std::exception& ex) {
        std::cout << ex.what() << std::endl;
    }

    ::cvReleaseImage(&camera);

    return 0;
}