예제 #1
0
void HandDetect::handDetecting()
{
	skinDetect();
	IplImage *tmp = cvCreateImage(cvGetSize(backproject), 8, 1);
	cvZero(tmp);

	if(track_comp.rect.height>0&&track_comp.rect.width>0)
	{
		cvCircle(tmp, handCen, track_box.size.width, CV_RGB(255, 255, 255), -1);
		cvDrawRect(tmp, cvPoint(track_window.x-(int)(track_box.size.width*0.2), track_window.y-(int)(track_box.size.height*0.2)), 
			cvPoint(track_window.x+(int)(track_box.size.width*1.2), track_window.y+track_box.size.height), CV_RGB(255, 255, 255), -1);
		
	}
	cvAnd(backproject, tmp, backproject, 0);
	cvDilate(backproject, backproject, 0, 1);
	cvErode(backproject, backproject, 0, 1);
	
	UsingYCbCr();
	cvAnd(gray, tmp, gray, 0);
	cvErode(gray, gray, 0, 1);
	cvDilate(gray, gray, 0, 1);
//	cvShowImage("52", gray);
	cvReleaseImage(&tmp);

	cvOr(gray, backproject, backproject, 0);
	
	handCen=cvPoint(track_box.center.x, track_box.center.y);
	
	setRad();
//	cvDrawRect(image, cvPoint(track_window.x, track_window.y), cvPoint(track_window.x+track_window.width, track_window.y+track_window.height), CV_RGB(255, 0, 0));
	cvCircle(image, handCen, 2, CV_RGB(255, 0, 0), 2);

}
예제 #2
0
//形态学约束击中-击不中变换 针对二值和灰度图像
void lhMorpHMTC(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	
	IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp4 = cvCreateImage(cvGetSize(src), 8, 1);

	IplImage*  mask1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask4 = cvCreateImage(cvGetSize(src), 8, 1);

	cvZero(mask1);
	cvZero(mask2);
	cvZero(mask3);
	cvZero(mask4);

	cvZero(dst);

	//P107 (5.5)
	cvErode( src, temp1, sebg);
	cvDilate( src, temp2, sebg);
	cvErode( src, temp3, sefg);
	cvDilate( src, temp4, sefg);

	cvCmp(src, temp3, mask1, CV_CMP_EQ);
	cvCmp(temp2, src,  mask2, CV_CMP_LT);
	cvAnd(mask1, mask2, mask2);

	cvCmp(src, temp4, mask3 , CV_CMP_EQ);
	cvCmp(temp1, src, mask4 , CV_CMP_GT);
	cvAnd(mask3, mask4, mask4);

	cvSub(src, temp2, dst, mask2);
	cvSub(temp1, src, dst, mask4);




	cvReleaseImage(&mask1);
	cvReleaseImage(&mask2);
	cvReleaseImage(&mask3);
	cvReleaseImage(&mask4);

	cvReleaseImage(&temp1);
	cvReleaseImage(&temp2);
	cvReleaseImage(&temp3);
	cvReleaseImage(&temp4);

	cvReleaseStructuringElement(&sebg);

}
예제 #3
0
void Image_OP::Dilate(int iter, IplImage * orig_img, IplImage* manipulated_img)
{
	if( iter > 0)
	{
	 this->Reset_Manipulators();
	  this->my_pic_manipulators.dilate = true;
	  
    if (manipulated_img == NULL)
	{
       manipulated_img = cvCreateImage(cvSize(orig_img->width,
		                  orig_img->height),IPL_DEPTH_8U,3);

	   // third parameter is structuring element for operations;
	   // if null then 3x3 area; can be defined by using cvCreatingStructuringElementEx
	   // iter (4th parameter) is the number of applied iterations
	   cvDilate(orig_img, manipulated_img, NULL,iter);
                   
       cvShowImage("dilate",manipulated_img); 

	   cvReleaseImage(&manipulated_img);
	}
	else
		cvDilate(orig_img, manipulated_img, NULL,iter);

	 this->my_manipulation_applied = iter;
	 
	 
	 }

}
예제 #4
0
void TextLocation::maxNumLimitedConnectComponet(const IplImage* img, const int maxNum,
        vector<CvRect> &rects, vector<CvPoint> &centers) {

    cvCopy(img, m_copyForCC);
    cvDilate(m_copyForCC, m_copyForCC, NULL, 2);

    // connected component
    int ccNum = 200;
    connectComponent(m_copyForCC, 1, kccPerimeter, &ccNum, rects, centers);

    // too many ccNum, dilate the cc img then cc again
    cvZero(m_maskForCC);
    cvCopy(img, m_copyForCC);

    while (ccNum > kboxMaxNum) {

        getMaskImgFromRects(m_copyForCC, rects, m_maskForCC);
        cvDilate(m_maskForCC, m_maskForCC, NULL, 2);

#ifdef DEBUG
        cvShowImage("maskImg", m_maskForCC);
#endif

        cvCopy(m_maskForCC, m_copyForCC);
        connectComponent(m_maskForCC, 1, kccPerimeter, &ccNum, rects, centers);

    }

}
// Dilate -> Erode -> Dilate
void Filterling ::noiseEraser (IplImage *srcImage, IplImage *dstImage) {
	int COL =3, ROW = 3, ITERATIONS = 1 ;

	IplConvKernel *elem =cvCreateStructuringElementEx (COL, ROW, 0, 0, CV_SHAPE_RECT, NULL) ;
	//cvMorphologyEx (img, img, NULL, elem, CV_MOP_CLOSE, 1) ; // 닫힘
	cvDilate (srcImage, dstImage, elem, ITERATIONS) ;	// 팽창
	cvErode (dstImage, dstImage, elem, ITERATIONS * 2) ;	//침식
	cvDilate (dstImage, dstImage, elem, ITERATIONS) ;	// 팽창
}
예제 #6
0
//形态学测地膨胀和膨胀重建运算
void lhMorpRDilate(const IplImage* src, const IplImage* msk, IplImage* dst, IplConvKernel* se = NULL, int iterations=-1)
{

	assert(src != NULL && msk != NULL && dst != NULL && src != dst );

	if(iterations < 0)
	{
		//膨胀重建
		cvMin(src, msk, dst);
		cvDilate(dst, dst, se);
		cvMin(dst, msk, dst);

		IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
		//IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);

		do
		{
			//record last result
			cvCopy(dst, temp1);
			cvDilate(dst, dst, se);
			cvMin(dst, msk, dst);
			//cvCmp(temp1, dst, temp2, CV_CMP_NE );

		}
		//while(cvSum(temp2).val[0] != 0);
		while(lhImageCmp(temp1, dst)!= 0);

		cvReleaseImage(&temp1);
		//cvReleaseImage(&temp2);

		return;	

	}
	else if (iterations == 0)
	{
		cvCopy(src, dst);
	}
	else
	{

		//普通测地膨胀 p136(6.1)
		cvMin(src, msk, dst);
		cvDilate(dst, dst, se);
		cvMin(dst, msk, dst);

		for(int i=1; i<iterations; i++)
		{
			cvDilate(dst, dst, se);
			cvMin(dst, msk, dst);

		}

	}
}
예제 #7
0
void CTransformImage::Morphology()
{
	if(!m_transImage)
		return;

	IplConvKernel* element = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL);

	cvDilate(m_transImage, m_transImage, element, 1);
	cvDilate(m_transImage, m_transImage, element, 1);
	cvErode (m_transImage, m_transImage, element, 1);
	cvErode (m_transImage, m_transImage, element, 1);

	cvReleaseStructuringElement(&element);
}
예제 #8
0
CV_IMPL void
cvMorphologyEx( const void* src, void* dst,
                void* temp, IplConvKernel* element, int op, int iterations )
{
    CV_FUNCNAME( "cvMorhologyEx" );

    __BEGIN__;

    if( (op == CV_MOP_GRADIENT ||
        ((op == CV_MOP_TOPHAT || op == CV_MOP_BLACKHAT) && src == dst)) && temp == 0 )
        CV_ERROR( CV_HeaderIsNull, "temp image required" );

    if( temp == src || temp == dst )
        CV_ERROR( CV_HeaderIsNull, "temp image is equal to src or dst" );

    switch (op)
    {
    case CV_MOP_OPEN:
        CV_CALL( cvErode( src, dst, element, iterations ));
        CV_CALL( cvDilate( dst, dst, element, iterations ));
        break;
    case CV_MOP_CLOSE:
        CV_CALL( cvDilate( src, dst, element, iterations ));
        CV_CALL( cvErode( dst, dst, element, iterations ));
        break;
    case CV_MOP_GRADIENT:
        CV_CALL( cvErode( src, temp, element, iterations ));
        CV_CALL( cvDilate( src, dst, element, iterations ));
        CV_CALL( cvSub( dst, temp, dst ));
        break;
    case CV_MOP_TOPHAT:
        if( src != dst )
            temp = dst;
        CV_CALL( cvErode( src, temp, element, iterations ));
        CV_CALL( cvDilate( temp, temp, element, iterations ));
        CV_CALL( cvSub( src, temp, dst ));
        break;
    case CV_MOP_BLACKHAT:
        if( src != dst )
            temp = dst;
        CV_CALL( cvDilate( src, temp, element, iterations ));
        CV_CALL( cvErode( temp, temp, element, iterations ));
        CV_CALL( cvSub( temp, src, dst ));
        break;
    default:
        CV_ERROR( CV_StsBadArg, "unknown morphological operation" );
    }

    __END__;
}
예제 #9
0
void CHandDrawEffect::EffectImage(IplImage* back, IplImage* frame, IplImage* alpha, IplImage* mask, IplImage* res)
{
	if(drawMode & 0x01) {
		//基本エフェクト
		Posterize(0xD0, frame, imageA);
	//	DrawHatching(frame, imageA);
		cvAnd(imageA, mask, imageB); //エフェクト処理後のCG部分のくりぬき

		//囲み
		cvNot(mask, imageA);
		cvDilate(imageA, imageD, 0, 1);
		cvDilate(mask, imageE, 0, 3);
		cvXor(imageE, imageD, mask);

		//アルファマスク更新
		cvNot(mask, imageA);
		cvConvertScale(imageA, imageA, 0.5);
		cvOr(alpha, imageA, alpha);

		//色付きの囲み
		cvNot(mask, imageA);
		cvAnd(imageA, imageC, imageA);
		cvOr(imageA, imageB, imageB);

		//走査線
		cvAnd(imageB, scanningLine, imageB);

		//アルファブレンド
		AlphaBlend(back, imageB, alpha, res);

		if(0) { //drawMode & 0x02) {
		//	DrawEdge(frame, imageB, res, 2);

			cvNot(mask, frame);
			cvDilate(frame, imageA, 0, 1);
			cvDilate(mask, imageB, 0, 3);
			cvXor(imageA, imageB, mask);
			cvAnd(mask, res, res);

			//色付きの線
			cvNot(mask, imageA);
			cvAnd(imageA, scanningLine, imageA);
			cvAnd(imageA, imageC, imageA);
			cvOr(res, imageA, res);
		}
	} else if(drawMode & 0x02) {
	//	DrawEdge(frame, imageB, res, 2);
	}
}
예제 #10
0
void TargetDetector::filterForGreen(Image* input)
{
    // Remove top and bottom chunks if desired
    if (m_topRemovePercentage != 0)
    {
        int linesToRemove = (int)(m_topRemovePercentage * m_image->getHeight());
        size_t bytesToBlack = linesToRemove * m_image->getWidth() * 3;
        memset(m_image->getData(), 0, bytesToBlack);
    }

    if (m_bottomRemovePercentage != 0)
    {
        int linesToRemove = 
            (int)(m_bottomRemovePercentage * m_image->getHeight());
        size_t bytesToBlack = linesToRemove * m_image->getWidth() * 3;
        int startIdx = 
            m_image->getWidth() * m_image->getHeight() * 3 - bytesToBlack;
        memset(&(m_image->getData()[startIdx]), 0, bytesToBlack);
    }

    // Filter the image so all green is white, and everything else is black
    m_filter->filterImage(m_image);

    if (m_erodeIterations)
    {
        cvErode(m_image->asIplImage(), m_image->asIplImage(), 0, 
                m_erodeIterations);
    }
    if (m_dilateIterations)
    {
        cvDilate(m_image->asIplImage(), m_image->asIplImage(), 0, 
                 m_dilateIterations);
    }

}
예제 #11
0
int openHand(PointCloud handPcl) {
	
	IplImage *imageHandBw = cvCreateImage(cvSize(640,480), 8, 1);
	
	binaryImage(handPcl, imageHandBw);
	cvDilate(imageHandBw, imageHandBw, NULL, 5);
	cvErode(imageHandBw, imageHandBw, NULL, 3);

	
	int perimeter = 0, area = 0;
	int i;
	for (i=0; i<FREENECT_FRAME_PIX; i++) {
		if(((unsigned char*)(imageHandBw->imageData))[i]) {
			area++;
			
			if (((unsigned char*)(imageHandBw->imageData))[i+1] == 0 ||
				((unsigned char*)(imageHandBw->imageData))[i-1] == 0 ||
				((unsigned char*)(imageHandBw->imageData))[i+FREENECT_FRAME_W] == 0 ||
				((unsigned char*)(imageHandBw->imageData))[i-FREENECT_FRAME_W] == 0)
				perimeter++;
		}
	}
	
	formFactor= 4 * M_PI * area / (perimeter * perimeter);
	
	//printf("PointCount: %d - Perimeter: %d - FF: %f\n", area, perimeter, formFactor);
	// cvShowImage("Depth", imageHandBw);

	cvReleaseImage(&imageHandBw);
	return (formFactor < OPEN_HAND_FORM_FACTOR);
	
}
예제 #12
0
void Hand_recognition::Sub_prevFrame(IplImage *src, IplImage *dst){
	static int frame_count = 0;

	if(prev_ground == NULL){
		prev_ground = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
		present_ground = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
		cvCvtColor(src, prev_ground, CV_BGR2GRAY);
	}

	cvZero(dst);

	if(frame_count == 1){
		cvCvtColor(src, present_ground, CV_BGR2GRAY);
		for(int i = 0; i < src->width; i++){
			for(int j = 0; j < src->height; j++){
				int pixel_sub = abs((unsigned char)prev_ground->imageData[i + j * prev_ground->widthStep] - (unsigned char)present_ground->imageData[i + j * present_ground->widthStep]);
				if(pixel_sub > SUB_THRESHOLD)
					dst->imageData[i + j * dst->widthStep] = (unsigned char)255;
			}
		}

		//prev_ground = cvCloneImage(present_ground);
		cvCopy(present_ground, prev_ground);

		cvSmooth(dst, dst, CV_MEDIAN,3,3);
		cvErode(dst, dst, 0, 5);
		cvDilate(dst, dst, 0, 20);

		frame_count = 0;
	}

	frame_count++;
}
예제 #13
0
파일: main.c 프로젝트: ntavish/tri
void findContours( IplImage* img, CvMemStorage* storage, CvSeq **contours)
{
    //for findContour function
    IplImage* timg  =NULL;
    IplImage* gray  =NULL;
    IplImage* tgray =NULL;

    CvSize sz = cvSize( img->width, img->height );

	// make a copy of input image
	gray = cvCreateImage( sz, img->depth, 1 );
	timg = cvCreateImage( sz, img->depth, 1 );
	tgray = cvCreateImage( sz, img->depth, 1 );

	cvSetImageCOI(img,1);
    cvCopy( img, timg,NULL );
	cvSetImageCOI(img,0);

    cvCopy( timg, tgray, 0 );

    cvCanny( tgray, gray, ct1, ct2, 5 );
    // holes between edge segments
    cvDilate( gray, gray, 0, 2 );

    cvFindContours( gray, storage, contours,
                    sizeof(CvContour),CV_RETR_LIST,
                    CV_CHAIN_APPROX_NONE, cvPoint(0,0) );

    //release all the temporary images
    cvReleaseImage( &gray );
    cvReleaseImage( &tgray );
    cvReleaseImage( &timg );

}
예제 #14
0
//形态学非约束击中-击不中变换 针对二值和灰度图像
void lhMorpHMTU(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	
	IplImage*  temp = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask = cvCreateImage(cvGetSize(src), 8, 1);
	cvZero(mask);

	//P106 (5.4)
	cvErode( src, temp, sefg);
	cvDilate(src, dst, sebg);
	cvCmp(temp, dst, mask, CV_CMP_GT);

	cvSub(temp, dst, dst, mask);
	cvNot(mask, mask);
	cvSet(dst, cvScalar(0), mask);

	//cvCopy(dst, mask);
	//cvSet(dst, cvScalar(255), mask);
	cvReleaseImage(&mask);
	cvReleaseImage(&temp);

	cvReleaseStructuringElement(&sebg);
}
예제 #15
0
void opencv_image_filter(IplImage* src, IplImage* dst) {
    cvSobel(src, dst, 1, 0);
    cvSubS(dst, cvScalar(50,50,50), src);
    cvScale(src, dst, 2, 0);
    cvErode(dst, src);
    cvDilate(src, dst);
}
예제 #16
0
void TextLocation::getGlobalThImg(const IplImage* src, double th, IplImage* globalThImg, int dilateDeg) {

    cvThreshold(src, globalThImg, th, 255, 0);
    inverseBinaryImage(m_globalThImg);
    cvDilate(m_globalThImg, m_globalThImg, NULL, dilateDeg);

}
예제 #17
0
파일: DIP1View.cpp 프로젝트: jnulzl/DIP
/*
功能:
	OpenCV图像膨胀
*/
void CDIP1View::OnCvErode() 
{
	// TODO: Add your command handler code here
	if(alert(1))
 		return;
	//**************************OpenCV图像处理主要步骤*****************************
	
	//0、判断当前图像是彩图还是灰度图
	int GrayOrColor;
	if(m_dib.IsGrade())
		GrayOrColor = 1;
	else
		GrayOrColor = 3;
	//1、把当前bmp图像转化为IplImage图像。1:表示当前bmp图像是灰度图;3:表示当前bmp图像是彩色图
	IplImage *temp = m_dib.cvBmpToIplImage(GrayOrColor);
	//2、开始图像处理
	cvDilate(temp,temp);
	//3、将处理后的图像(一般指第2步创建的图像)数据赋到bmp图像数据
	m_dib.cvDataToBmp(temp);
	//4、释放前两步创建的IplImage图像
	cvReleaseImage(&temp);//释放temp	
	
	 //刷新屏幕
 	Invalidate(1);
}
예제 #18
0
int main(int argc, char** argv)
{
	IplImage *img = cvLoadImage("4un-zishiying.jpg", 0);
	if (img == NULL)
	{
		printf("img load failed!\n");
		return 0;
	}
	IplImage *img_erode = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage *img_dilate = cvCreate+Image(cvGetSize(img), 8, 1);

	cvErode(img, img_erode, NULL, 1); //腐蚀
	cvDilate(img, img_dilate, NULL, 1); //膨胀

	cvNamedWindow("img_erode");
	cvNamedWindow("img_dilate");

	cvShowImage("img_erode", img_erode);
	cvShowImage("img_dilate", img_dilate);

	cvWaitKey(-1);


	cvReleaseImage(&img_dilate);
	cvReleaseImage(&img_erode);

	cvDestroyAllWindows();


	return 0;
}
예제 #19
0
void test_backremoval(){
	float max_value;
	float min_value;
	IplImage * rimage = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); 
	IplImage * min_image = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); 
	int values [] = {0,0,0,0,0,0,0,0,0};
	IplConvKernel * kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_RECT,values);
	cvDilate(image,rimage,kernel);
	display_image("dilate",rimage);
	cvErode(rimage,rimage,kernel);
	display_image("erode",rimage);
	
	//cvDilate(max_image,max_image,kernel);
	//cvDilate(max_image,max_image,kernel);
	display_image("original",image);
	display_image("rimage",rimage);

	//rimage = test_background_subtraction(image,max_image);
	//display_image("back removed",rimage);

	//IplImage * otsu_image = otsu_algorithm(rimage);
	//otsu_image = invert_image(otsu_image);
	//display_image("otsu image",otsu_image);

}
예제 #20
0
void bn_closure(IplImage* img,int n)
{
	bn_reverse(img);
	cvDilate(img,img,NULL,n);
	cvErode(img,img,NULL,n);
	bn_reverse(img);
}
예제 #21
0
void cvClose(CvArr *src, CvArr *dst, CvArr *mask, size_t n) {
	cvCopy(src, dst, mask);
	for (size_t i = 0; i < n; i++) {
		cvErode(dst, dst, NULL, 1);
		cvDilate(dst, dst, NULL, 1);
	}
}
예제 #22
0
void test_min_max_edge_detection(){
	tester->start_timer();
	float max_value;
	float min_value;
	IplImage * max_image = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); 
	IplImage * min_image = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); 
	int values [] = {0,0,0,0,0,0,0,0,0};
	IplConvKernel * kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_RECT,values);
	cvErode(image,min_image,kernel);
	cvDilate(image,max_image,kernel);
	//display_image("erode",min_image);
	//display_image("dilate",max_image);

	int kernel_radius = 1;
	//test_get_normalization_parameters(image,min_image,max_image,kernel_radius,max_value,min_value);

	IplImage * rimage = cvCloneImage(image);
	for(int y=0;y<image->height;y++){

		for(int x=0;x<image->width;x++){
			//int value = get_pixel(image,y,x);
			//printf("%d \n",value);
			float enhanced_value = test_min_max_enhancement(y,x,kernel_radius,image,min_image,max_image); 
			//int new_value = 255 * enhanced_value/max_value;
			int new_value = enhanced_value;
			set_pixel(rimage,y,x,new_value);
		}
	}

	tester->stop_timer();
	display_image("result",rimage);
	cvSaveImage("images/edges.png",rimage);

}
예제 #23
0
FkInt32S FkSilhDetector_PaintedFlies::denoiseChangeMask() 
{
    cvErode(m_rawChangeMask,m_denoisedChangeMask,NULL,1);
    cvDilate(m_denoisedChangeMask,m_denoisedChangeMask,NULL,1);
    CvMemStorage* storage = cvCreateMemStorage();
    CvSeq* first_contour = NULL;

//    cvMorphologyEx( m_rawChangeMask, m_denoisedChangeMask, 0, 0, CV_MOP_CLOSE, 2);//CVCLOSE_ITR );

    int Nc = cvFindContours(m_denoisedChangeMask,storage,&first_contour,sizeof(CvContour),CV_RETR_EXTERNAL);//CV_RETR_LIST);

    cvZero(m_denoisedChangeMask);

    for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) 
    {
        double len = cvContourPerimeter( c );
        if (len>10) //to make sure we get rid of noises
        {
            cvDrawContours(m_denoisedChangeMask, c, cvScalar(255), cvScalar(255), -1, CV_FILLED, 8);
        }
    }

    cvReleaseMemStorage(&storage);
    return(FK_OK);

}
예제 #24
0
void detect_object(IplImage *image, IplImage *pBkImg, IplImage *pFrImg, CvMat *pFrameMat, CvMat *pBkMat, CvMat *pFrMat,int thre_limit)
{
	nFrmNum++;
	cvCvtColor(image, pFrImg, CV_BGR2GRAY);
	cvConvert(pFrImg, pFrameMat);
	//高斯滤波
	cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
	//当前帧减去背景图像并取绝对值
	cvAbsDiff(pFrameMat, pBkMat, pFrMat);
	//二值化前景图像
	cvThreshold(pFrMat, pFrImg,thre_limit, 255.0, CV_THRESH_BINARY);

	/*形态学滤波*/
	//IplConvKernel* element = cvCreateStructuringElementEx(2, 2, 0, 0, CV_SHAPE_RECT);
	//cvErode(pFrImg, pFrImg,element, 1);	// 腐蚀
	//delete element;

	//element = cvCreateStructuringElementEx(2, 2, 1, 1, CV_SHAPE_RECT);
	//cvDilate(pFrImg, pFrImg, element, 1);	//膨胀
	//delete element;
	cvErode(pFrImg, pFrImg,0, 1);	// 腐蚀
	cvDilate(pFrImg, pFrImg,0, 1);	//膨胀

	//滑动平均更新背景(求平均)
	cvRunningAvg(pFrameMat, pBkMat, 0.004, 0);
	//将背景矩阵转化为图像格式,用以显示
	cvConvert(pBkMat, pBkImg);

	cvShowImage("background", pFrImg);
//	cvShowImage("background", pBkImg);
}
예제 #25
0
int main(){
    IplImage *img= cvLoadImage("pic.jpg");//读取图片
    cvNamedWindow("Example1",CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Example2",CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Example3",CV_WINDOW_AUTOSIZE);

    cvShowImage("Example1",img);//在Example1显示图片
    //    cvCopy(img,temp);
    IplImage* temp=cvCreateImage( //创建一个size为image,三通道8位的彩色图
        cvGetSize(img),
        IPL_DEPTH_8U,
        3
        );

    cvErode(img,temp,0,1);//腐蚀
    cvShowImage("Example2",temp);

    cvDilate(img,temp,0,1);//膨胀
    cvShowImage("Example3",temp);


    cvWaitKey(0);//暂停用于显示图片


    cvReleaseImage(&img);//释放img所指向的内存空间并且
    cvDestroyWindow("Example1");
    cvDestroyWindow("Example2");
    cvDestroyWindow("Example3");
    
    return 0;
}
예제 #26
0
// calibration function to be run at the beginning only
vector<double> calibrate(){
	
	cvSmooth(frame, imageFiltree, CV_BLUR,seuilFiltre,seuilFiltre,0.0,0.0);
	cvCvtColor(imageFiltree, imageHSV,CV_BGR2HSV);
	cvInRangeS(imageHSV,cvScalar(hmin, smin, vmin, 0.0),cvScalar(hmax, smax, vmax, 0.0),imageBinaire);
	cvErode(imageBinaire, imageErodee, NULL, nbErosions);
	cvDilate(imageErodee, imageDilatee, NULL, nbDilatations);
	
	imageObjectRGB = multBinColor(imageDilatee, frame);
	imageObjectHSV = multBinColor(imageDilatee, imageHSV);
	
	vector<vector<CvPoint3D32f> > vecDistinctPoints = findPoint();
	
	// find the centroid of the object and trace it
	vector<CvPoint> centroid = centroiding(vecDistinctPoints);
	sort(centroid);
	
	vector<double> tanAlphaT = vector<double>(centroid.size(),0);
	double p;
	
	for (int i=0; i<centroid.size(); i++){
		p = abs(centroid[i].x - (frame->width / 2));
		tanAlphaT[i] = atan(d/D-p*ratioPixelSizeF);
	}
	return tanAlphaT;
}
예제 #27
0
void MainWindow::BackgroundDiff()
{
    ui->alpha_slider->setEnabled(true);

    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);
  //  IplImage* pFrame=NULL;
    nFrameNum=0;

    while(pFrame = cvQueryFrame( pCapture ))
    {
        nFrameNum++;
        //如果是第一帧,需要申请内存,并初始化

        if(nFrameNum == 1)
        {
            pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),IPL_DEPTH_8U,1);
            pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
            pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
            pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
            pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);

            //转化成单通道图像再处理
            cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
            cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
            cvConvert(pFrImg, pFrameMat);
            cvConvert(pFrImg, pFrMat);
            cvConvert(pFrImg, pBkMat);
        }
        else
        {
            cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
            cvConvert(pFrImg, pFrameMat);
            //先做高斯滤波,以平滑图像
            cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
            //当前帧跟背景图相减
            cvAbsDiff(pFrameMat, pBkMat, pFrMat);
            //二值化前景图
            cvDilate(pFrMat,pFrMat);
            cvErode(pFrMat,pFrMat);

            cvThreshold(pFrMat, pFrImg, lowThreshold, 255.0, CV_THRESH_BINARY);
            //更新背景
            cvRunningAvg(pFrameMat, pBkMat, alpha,0);
            //将背景转化为图像格式,用以显示
            cvConvert(pBkMat, pBkImg);
            pFrame->origin = IPL_ORIGIN_BL;
            pFrImg->origin = IPL_ORIGIN_BL;
            pBkImg->origin = IPL_ORIGIN_BL;

        }

        if(27==cvWaitKey(33))
            break;

        MainWindow::Display(pFrame,pBkImg,pFrImg);
    }

}
예제 #28
0
void riduciNoise(IplImage *src,IplImage *dst)
{
    IplImage *buff = cvCreateImage(cvGetSize(src),8,dst->nChannels);
    cvDilate(src,buff,NULL,1);
    cvErode(buff,buff,NULL,2);
    cvSmooth(buff,dst,CV_GAUSSIAN,5);
    cvReleaseImage(&buff);
}
예제 #29
0
void callback(int i)
{
	float time;
	clock_t t1, t2;
	
	// Start timer
	t1 = clock();
	
	// Filtering, HSV to Binary Image, Erosions and Dilations
	cvSmooth(frame, imageFiltree, CV_BLUR,seuilFiltre,seuilFiltre,0.0,0.0);
	cvCvtColor(imageFiltree, imageHSV,CV_BGR2HSV);
	cvInRangeS(imageHSV,cvScalar(hmin, smin, vmin, 0.0),cvScalar(hmax, smax, vmax, 0.0),imageBinaire);
	cvErode(imageBinaire, imageErodee, NULL, nbErosions);
	cvDilate(imageErodee, imageDilatee, NULL, nbDilatations);
	
	//imageDilateeFiltree =  lowPassFilter(imageDilatee); FILTER
	
	// multiplication between the original image in RGB and HSV and the binary image
	imageObjectRGB = multBinColor(imageDilatee, frame);
	imageObjectHSV = multBinColor(imageDilatee, imageHSV);
	
	// find the points and separate them (rows correspond to each point and the columns to the pixels belonging to the points)
	vector<vector<CvPoint3D32f> > vecDistinctPoints = findPoint();
	
	// find the centroid of the point and trace it
	vector<CvPoint> centroid = centroiding(vecDistinctPoints);
	// sort the centroids
	centroid = sort(centroid);
	
	// compute the distance with and without lens distortion
	vector<double> distance = findDistance(imageObjectHSV, centroid, tanAlphaT);
	
	// Contours
	/*cvFindContours( imageDilatee, storage, &contours, sizeof(CvContour),
	 CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0,0) );*/
	
	/*cvDrawContours( frame, contours,
	 CV_RGB(255,255,0), CV_RGB(0,255,0),
	 1, 2, 8, cvPoint(0,0));*/
	
	
	cvNamedWindow(myWindow, CV_WINDOW_AUTOSIZE);
	cvNamedWindow(myWindowObjectHSV, CV_WINDOW_AUTOSIZE);
	cvNamedWindow(myWindowObjectRGB, CV_WINDOW_AUTOSIZE);
	cvShowImage(myWindow, frame);
	cvShowImage(myWindowObjectHSV, imageObjectHSV);
	cvShowImage(myWindowObjectRGB, imageObjectRGB);
	//cvSaveImage("NoisyGridCentroiding.png", imageObjectRGB,0);
	
	// End timer
	t2 = clock();
	
	// Compute execution time
	time = (float)(t2 - t1) / CLOCKS_PER_SEC;
	
	cout << "execution time = " << time << " s" << endl;
	
}
예제 #30
0
// callback function for open/close trackbar
void OpenClose(int pos)
{
    int n = open_close_pos - max_iters;
    int an = n > 0 ? n : -n;
    element = cvCreateStructuringElementEx( an*2+1, an*2+1, an, an, element_shape, 0 );
    if( n < 0 )
    {
        cvErode(src,dst,element,1);
        cvDilate(dst,dst,element,1);
    }
    else
    {
        cvDilate(src,dst,element,1);
        cvErode(dst,dst,element,1);
    }
    cvReleaseStructuringElement(&element);
    cvShowImage("Open/Close",dst);
}