//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::adaptiveThreshold( int blockSize, int offset, bool invert, bool gauss) {
	if( !bAllocated ){
		ofLogError("ofxCvGrayscaleImage") << "adaptiveThreshold(): image not allocated";		
		return;	
	}

    if( blockSize < 2 ) {
        ofLogNotice("ofxCvGrayscaleImage") << "adaptiveThreshold(): block size " << blockSize << " < minimum, setting to 3";
        blockSize = 3;
    }

    if( blockSize % 2 == 0 ) {
        ofLogNotice("ofxCvGrayscaleImage") << "adaptiveThreshold(): block size " << blockSize << " not odd, adding 1";
        blockSize++;
    }

    int threshold_type = CV_THRESH_BINARY;
    if(invert) threshold_type = CV_THRESH_BINARY_INV;

    int adaptive_method = CV_ADAPTIVE_THRESH_MEAN_C;
    if(gauss) adaptive_method = CV_ADAPTIVE_THRESH_GAUSSIAN_C;

    cvAdaptiveThreshold( cvImage, cvImageTemp, 255, adaptive_method,
                         threshold_type, blockSize, offset);
   swapTemp();
   flagImageChanged();
}
Exemple #2
0
int main( int argc, char** argv )
{


	if((Igray = cvLoadImage(argv[1],CV_LOAD_IMAGE_GRAYSCALE))==0)
	{
		return -1;
	}

	Iat = cvCreateImage(cvSize(Igray->width,Igray->height),IPL_DEPTH_8U,1);
	It= cvCreateImage(cvSize(Igray->width,Igray->height),IPL_DEPTH_8U,1);
	cvAdaptiveThreshold(Igray,Iat,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,25,11);



	

	cvNamedWindow("Adaptive Threshold",1);

	cvShowImage("Adaptive Threshold", Iat);
	cvSaveImage("numberplate_Threshold.jpg",Iat);
	cvWaitKey(0);
	cvDestroyWindow("Adaptive Threshold");

	return 0;
}
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::adaptiveThreshold( int blockSize, int offset, bool invert, bool gauss) {
	if( !bAllocated ){
		ofLog(OF_LOG_ERROR, "in adaptiveThreshold, image is not allocated");		
		return;	
	}

    if( blockSize < 2 ) {
        ofLog(OF_LOG_NOTICE, "in adaptiveThreshold, value < 2, will make it 3");
        blockSize = 3;
    }

    if( blockSize % 2 == 0 ) {
        ofLog(OF_LOG_NOTICE, "in adaptiveThreshold, value not odd -> will add 1 to cover your back");
        blockSize++;
    }

    int threshold_type = CV_THRESH_BINARY;
    if(invert) threshold_type = CV_THRESH_BINARY_INV;

    int adaptive_method = CV_ADAPTIVE_THRESH_MEAN_C;
    if(gauss) adaptive_method = CV_ADAPTIVE_THRESH_GAUSSIAN_C;

    cvAdaptiveThreshold( cvImage, cvImageTemp, 255, adaptive_method,
                         threshold_type, blockSize, offset);
   swapTemp();
   flagImageChanged();
}
Exemple #4
0
int main( int argc, char** argv )
{
IplImage *Igray=0, *Iat,*Iat2;
/*
==============================================================
==============================================================
		Adaptive Threshold Testing

Output Image Iat is the adaptive threshold output of the input
image. Selected when the code is run.
==============================================================
==============================================================
*/
/*
int x =180,y =200;
ptr2.val[0] = 255;
ptr2.val[1] = 0;
ptr2.val[2] = 0; */
if((Igray = cvLoadImage("bob.jpg",1))==0)
{
	return -1;
}
Iat = cvCreateImage(cvSize(Igray->width,Igray->height),IPL_DEPTH_8U,1);
//Iat2 = cvCloneImage(Iat);

cvAdaptiveThreshold(Igray,Iat,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY_INV,100,21);

//cvSet2D(Igray,x,y,ptr2);





/*
==============================================================
==============================================================
		Clean Memory and alles
==============================================================
==============================================================
*/
 cvWaitKey(0);
cvNamedWindow("Adaptive Threshold",1);
cvShowImage("Adaptive Threshold", Igray);

cvSaveImage("testAdaptive.jpg",Iat);

/*
==============================================================
==============================================================
==============================================================
==============================================================
*/


cvDestroyWindow("Adaptive Threshold");
return(0);
}
int adaptiveThreshold(char*path)
{
	IplImage *Igray = 0, *It = 0, *Iat;

	/*if( argc != 7 )
	{
		return -1;
	}*/
	
	//输入命令行
	//double threshold = (double)atof( argv[1] ); //convert string to double
	//int threshold_type = atoi( argv[2] ) ? CV_THRESH_BINARY : CV_THRESH_BINARY_INV;
	//int adaptive_method = atoi( argv[3] ) ? CV_ADAPTIVE_THRESH_MEAN_C : CV_ADAPTIVE_THRESH_GAUSSIAN_C;
	//int block_size = atoi( argv[4] );
	//double offset = (double)atof( argv[5] );

	//这两个参数是对cvThreshold()设置的
	double threshold =  70;
	int threshold_type =CV_THRESH_BINARY;

    //这两个参数是对cvAdaptiveThreshold()设置的
	int adaptive_method = CV_ADAPTIVE_THRESH_GAUSSIAN_C;
	int block_size = 3;
	double offset = 15.0;
	
	//加载灰度图
	if( ( Igray = cvLoadImage(path, CV_LOAD_IMAGE_GRAYSCALE ) ) == 0 ){
		return -1;
	}
	
	//创建同样大小8位灰度图用于输出
	It = cvCreateImage( cvSize( Igray -> width, Igray -> height ), IPL_DEPTH_8U, 1 ); //单通道8位灰度图
	Iat = cvCreateImage( cvSize( Igray -> width, Igray -> height ), IPL_DEPTH_8U, 1 );
	
	//阈值化
	cvThreshold( Igray, It, threshold, 255, threshold_type );
	cvAdaptiveThreshold( Igray, Iat, 255, adaptive_method, CV_THRESH_BINARY_INV, block_size, offset );
	
	//命名窗体输出
	cvNamedWindow( "Raw", 1 );
	cvNamedWindow( "Threshold", 1 );
	cvNamedWindow( "Adaptive Threshold", 1 );
	cvShowImage( "Raw", Igray );
	cvShowImage( "Threshold", It );
	cvShowImage( "Adaptive Threshold", Iat );
	cvWaitKey(0);
	
	//回收内存
	cvReleaseImage( &Igray );
	cvReleaseImage( &It );
	cvReleaseImage( &Iat );
	cvDestroyWindow( "Raw" );
	cvDestroyWindow( "Threshold" );
	cvDestroyWindow( "Adaptive Threshold" );

	return 0;
}
void OpenCVOperations::adaptiveThreshold(SLImage * input, SLImage * output) {
	if (NULL == input || NULL == output)
		return;
	OpenCVImage * iplInput = dynamic_cast<OpenCVImage *>(input);
	OpenCVImage * iplOutput = dynamic_cast<OpenCVImage *>(output);
	if (0 == iplInput || 0 == iplOutput ||
			0 == iplInput->getIplImage() || 0 == iplOutput->getIplImage())
		return;
	cvAdaptiveThreshold( iplInput->getIplImage(), iplOutput->getIplImage(), 255.);
}
Image* Image::threshold(int degree=3) {
  Image* other = this -> cloneEmpty();
  cvAdaptiveThreshold(p_img,
                      other -> toIplImage(),
                      255, //double maxValue,
                      CV_ADAPTIVE_THRESH_MEAN_C,
                      CV_THRESH_BINARY,
                      degree,
                      0);
  return other;
}
void COpenCVMFCView::OnAdaptiveThreshold()
{
	// TODO: Add your command handler code here

	cvAdaptiveThreshold(workImg,workImg,255,
		CV_ADAPTIVE_THRESH_GAUSSIAN_C,
		CV_THRESH_BINARY_INV,3,5);

	m_ImageType = -1;
	
	Invalidate();
}
Exemple #9
0
int main(int argc, char* argv[])
{
  IplImage *img = cvLoadImage("test.png", 1);

 // cvSetImageROI(img, cvRect(100, 100, 800, 500));

  cvSmooth(img, img, 3, 7);

  IplImage *grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
  cvCvtColor(img, grey, CV_BGR2GRAY);
  cvAdaptiveThreshold(grey, grey, 255, CV_ADAPTIVE_THRESH_GAUSSIAN_C, 0, 19, 2.0); 

  IplImage *labelImg = cvCreateImage( (cvGetSize(grey)),32,1);

  CvSeq* lb;


  CvMemStorage * ms = cvCreateMemStorage();
  int res = cvBlobDLabel(grey, labelImg, ms, &lb, 400, (CVBLOBDLL_GETCONTOUR | CVBLOBDLL_GETHOLESCONT | CVBLOBDLL_GETMEANCOLOR), img);
  
    CvBlobObj*  elBlob;
    int i;

	for ( i = 0; i < lb->total; i++)
	{
		elBlob = (CvBlobObj*) cvGetSeqElem((CvSeq*) lb, i);
		// extract and draws blob contour
		if (elBlob->contour) {
			cvDrawContours(img, elBlob->contour, CV_RGB(255,0,0), CV_RGB(255,255,0), 1, 3);


			//CvSeq* hull = cvConvexHull2(elBlob->contour, ms, CV_CLOCKWISE, 1);
			//cvDrawContours(img, hull, CV_RGB(0,0,255), CV_RGB(0,255,255), 1, 3);
		};
		// extract and draws every blob internal hole contour
		if (elBlob->internalContours) {
			CvSeq* seqCont = elBlob->internalContours;
			while ( seqCont ) 
			{
			    cvDrawContours(img, seqCont, CV_RGB(0,255,0), CV_RGB(0,0,0), 1, 1);
				seqCont = seqCont->h_next;
			}
		};
		cvCircle(img, cvPoint((int)(elBlob->centroid.x),(int)(elBlob->centroid.y)), 2, CV_RGB(0,0,255), 2); 
	};

	cvBlobDRelease();
	cvReleaseMemStorage(&ms);

    cvResetImageROI(img);
	cvSaveImage("imgout.png", img);
	return 0;
}
Exemple #10
0
int main (int argc, const char * argv[])
{
	if ( argc != 2 )
	{
		fprintf(stderr, "Usage: <image>\n");
		exit(1);
	}
	
	IplImage* image = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
	
	cvNamedWindow("main", CV_WINDOW_NORMAL);
	cvShowImage("main", image);
	cvWaitKey(0);
	
	IplImage* dst = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1);
	
	const char* thresholdTypeName[] = { "CV_THRESH_BINARY", "CV_THRESH_BINARY_INV", "CV_THRESH_TRUNC", "CV_THRESH_TOZERO_INV", "CV_THRESH_TOZERO" };
	const int thresholdType[] = { CV_THRESH_BINARY, CV_THRESH_BINARY_INV, CV_THRESH_TRUNC, CV_THRESH_TOZERO_INV, CV_THRESH_TOZERO };
	
	for(int i = 0; i < sizeof(thresholdType)/sizeof(thresholdType[0]); ++i)
	{
		printf("Threshold Type %s\n", thresholdTypeName[i]);
		cvThreshold(image, dst, 128, 255, thresholdType[i]);
		cvShowImage("main", dst);
		cvWaitKey(0);
	}
	
	// part a and b
	const char* adaptiveThresholdTypeName[] = { "CV_THRESH_BINARY", "CV_THRESH_BINARY_INV" };
	const int adaptiveThresholdType[] = { CV_THRESH_BINARY, CV_THRESH_BINARY_INV };
	const int adaptiveMethod[] = { CV_ADAPTIVE_THRESH_MEAN_C, CV_ADAPTIVE_THRESH_GAUSSIAN_C };
	const char* adaptiveMethodName[] = { "CV_ADAPTIVE_THRESH_MEAN_C", "CV_ADAPTIVE_THRESH_GAUSSIAN_C" };
	const int param1Values[]	 = { 5, 0, -5 };
	
	for(int param1ValuesIndex = 0; param1ValuesIndex < sizeof(param1Values)/sizeof(param1Values[0]); param1ValuesIndex++)
	{
		int param1 = param1Values[param1ValuesIndex];
		
		for(int i = 0; i < sizeof(adaptiveThresholdType)/sizeof(adaptiveThresholdType[0]); ++i)
		{
			for(int j = 0; j < sizeof(adaptiveMethod)/sizeof(adaptiveMethod[0]); j++)
			{
				printf("Adaptive threshold type %s, method %s, param1 %d\n", adaptiveThresholdTypeName[i], adaptiveMethodName[j], param1);
				cvAdaptiveThreshold(image, dst, 255, adaptiveMethod[j], adaptiveThresholdType[i], 3, param1);
				cvShowImage("main", dst);
				cvWaitKey(0);
			}
		}
	}
	
    return 0;
}
int main( int argc, char** argv )
{
    if (argc != 7) { return -1; }

    // Command line.
    double threshold = (double) atof(argv[1]);
    int threshold_type = atoi(argv[2]) ?
             CV_THRESH_BINARY : CV_THRESH_BINARY_INV;
    int adaptive_method = atoi(argv[3]) ?
             CV_ADAPTIVE_THRESH_MEAN_C : CV_ADAPTIVE_THRESH_GAUSSIAN_C;
    int block_size = atoi(argv[4]);
    double offset = (double) atof(argv[5]);

    // Read in gray image.
    if ( ( Igray = cvLoadImage(argv[6], CV_LOAD_IMAGE_GRAYSCALE) ) == 0 ) {
        return -1;
    }

    // Create the grayscale output images.
    It = cvCreateImage(cvSize(Igray->width,Igray->height),
                         IPL_DEPTH_8U, 1);
    Iat = cvCreateImage(cvSize(Igray->width,Igray->height),
                         IPL_DEPTH_8U, 1);

    // Threshold.
    cvThreshold(Igray, It, threshold, 255, threshold_type);
    cvAdaptiveThreshold(Igray, Iat, 255, adaptive_method,
                        threshold_type, block_size, offset);

    // PUT UP 2 WINDOWS.
    cvNamedWindow("Raw", 1);
    cvNamedWindow("Threshold", 1);
    cvNamedWindow("Adaptive Threshold", 1);

    // Show the results.
    cvShowImage("Raw", Igray);
    cvShowImage("Threshold", It);
    cvShowImage("Adaptive Threshold", Iat);
    cvWaitKey(0);

    // Clean up.
    cvReleaseImage(&Igray);
    cvReleaseImage(&It);
    cvReleaseImage(&Iat);
	  cvDestroyWindow("Raw");
	  cvDestroyWindow("Threshold");
	  cvDestroyWindow("Adaptive Threshold");
	  return(0);
}
void COpenCVCheck::OpenCVBinary(CString fileName)
{
	CvScalar colors[] = {{255,255,255},{0,0,0}};
	IplImage* pImg; //声明IplImage指针
	if((pImg = cvLoadImage(fileName, 0)) != 0)
	{
		IplImage* dst = NULL;
		dst=cvCreateImage(cvSize(pImg->width,pImg->height),IPL_DEPTH_8U,1);
		//cvThreshold(pImg,dst,185,255,CV_THRESH_BINARY);
		cvAdaptiveThreshold(pImg,dst,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,5,3);//二值化

		ReverseColor(dst);
		for (int kk = 0;kk<2;kk++)   //去噪
		{
			CvSeq *contours;
			CvMemStorage* storage = cvCreateMemStorage(0);

			cvFindContours( dst, storage, &contours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
			//此函数以黑色为背景色
			while(contours)
			{
				//approximate contour with accuracy proportional
				CvSeq* result = cvApproxPoly( contours, sizeof(CvContour),
					storage,CV_POLY_APPROX_DP, 3, 1);
				//to filter the noisy contour
				if(fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 2)
				{
					if (result->total > 0)
					{
						for(int  i = 0; i < (result ? result->total : 0); i++)
						{
							CvRect* r = (CvRect*)cvGetSeqElem(result,i);
							cvSet2D(dst,r->y,r->x,colors[1]);

						}

					}
				}
				contours = contours->h_next;
			}	
		}
		ReverseColor(dst);
		ClearNoise(dst);
		cvSaveImage(fileName,dst);
		cvReleaseImage(&dst);
		cvReleaseImage(&pImg);
	}
}
Exemple #13
0
IplImage *contoursGetOutlineMorh(IplImage *src, IplImage *temp, int mask)
{
    int radius = 3;
    int cols = radius * 2 + 1;
    int rows = cols;
    IplImage *res;
    IplImage *bin  = cvCreateImage(cvGetSize(src), src->depth, 1);

    cvAdaptiveThreshold(src, bin, 255, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, 1);

    if (mask == 1) {
        IplImage *mask = cvCreateImage(cvGetSize(src), src->depth, 1);
        res = cvCreateImage(cvGetSize(src), src->depth, 1);
        cvThreshold(src, mask, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU);
        cvOr(bin, mask, res, NULL);

        cvReleaseImage(&mask);
    } else {
        res = bin;
    }

    IplConvKernel *element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL);

    cvMorphologyEx(res, res, temp, element, CV_MOP_OPEN, 1);
    cvReleaseStructuringElement(&element);

    radius = 9;
    cols = radius * 2 + 1;
    rows = cols;
    element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL);
    cvMorphologyEx(res, res, temp, element, CV_MOP_CLOSE, 1);
    cvReleaseStructuringElement(&element);

    radius = 7;
    cols = radius * 2 + 1;
    rows = cols;
    element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL);
    cvErode(res, res, element, 1);
    cvDilate(res, res, element, 1);

    contoursDrawBorder(res);

    cvReleaseStructuringElement(&element);
    cvReleaseImage(&temp);

    return res;
}
Exemple #14
0
int PlateFinder::CountCharacter(IplImage *plate) {
	int cnt = 0;
	IplImage *resizeImg, *binaryImg;

	resizeImg = cvCreateImage (cvSize(408, 70), IPL_DEPTH_8U, 3);
	binaryImg = cvCreateImage (cvSize(408, 70), IPL_DEPTH_8U,  1);

	cvResize(plate, resizeImg);
	cvCvtColor(resizeImg, binaryImg, CV_RGB2GRAY);
	cvAdaptiveThreshold(binaryImg, binaryImg, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, 13, 2);

	//cvShowImage("binaryImg", binaryImg);

	CvMemStorage *storage = cvCreateMemStorage(0);
	CvSeq *contours = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), storage);
	cvFindContours(binaryImg, storage, &contours);
	
	//cvShowImage("contours", binaryImg);

	//CvSeq *contours = 0;
	//cvFindContours(binaryImg, storage, &contours);

	while (contours) {
		CvRect rect = cvBoundingRect(contours);

		if (rect.width > 15 && rect.width < 50
			&& rect.height > 40 && rect.height < 65
			&& rect.width * rect.height > 1000) 
		{
			cvRectangle (resizeImg, cvPoint(rect.x, rect.y), 
				cvPoint(rect.x + rect.width, rect.y + rect.height), GREEN, 2);

			cnt++;
		}
		contours = contours->h_next;
	}

	//cvShowImage("resizeImg", resizeImg);

	return cnt;
}
void do_adaptive_threshold(IplImage* src, double param1)
{
    for (int i = 0; i < 2; ++i) {

        IplImage* dst = cvCreateImage( cvGetSize(src), src->depth, src->nChannels );

        cvAdaptiveThreshold(
            src,
            dst,
            255,                        // max_val
            CV_ADAPTIVE_THRESH_MEAN_C,  // adaptive method
            threshold_type[i],          // threshold type
            3,                          // block size
            param1                      // a certain constant
        );

        char title[50];
        sprintf(title, "Adaptive threshold, %s, %f", threshold_type_names[i], param1);
        show_image(dst, title);

    }
}
Exemple #16
0
/*!
 * @function adaptiveThresh
 * @discussion Perform adaptive thresholding.
 * @updated 2011-4-15
 */
char* adaptiveThreshold(IplImage* frameImage)
{
    IplImage *grayTex;
    IplImage *grayTemp;
    
    switch(captureSize)
    {
        case(SMALL_BACK):
        case(SMALL_FRONT):
            grayTex = cvCreateImage(cvSize(192, 144), IPL_DEPTH_8U, 1);
            grayTemp = cvCreateImage(cvSize(192, 144), IPL_DEPTH_8U, 1);
            
            break;
        case(MEDIUM_BACK):
        case(LARGE_FRONT):
        case(MEDIUM_FRONT):
            grayTex = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
            grayTemp = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
            
            break;
        case(LARGE_BACK):
            grayTex = cvCreateImage(cvSize(1280, 720), IPL_DEPTH_8U, 1);
            grayTemp = cvCreateImage(cvSize(1280, 720), IPL_DEPTH_8U, 1);
            
            break;
    }
    
 
    cvCvtColor(frameImage, grayTex, CV_RGB2GRAY);
    
    int type =  CV_THRESH_BINARY;           //CV_THRESH_BINARY_INV; 
    int method = CV_ADAPTIVE_THRESH_MEAN_C; //CV_ADAPTIVE_THRESH_GAUSSIAN_C; 
    
    int blockSize = 173; 
    double offset = 1; 
    
    cvAdaptiveThreshold(grayTex, grayTemp, 255, method, type, blockSize, offset); 
    
    switch(captureSize)
    {
        case(SMALL_BACK):
        case(SMALL_FRONT):
            convertedImage = cvCreateImage(cvSize(192, 144), IPL_DEPTH_8U, 4);
            break;
        case(MEDIUM_BACK):
        case(LARGE_FRONT):
        case(MEDIUM_FRONT):
            convertedImage = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 4);
            break;
        case(LARGE_BACK):
            convertedImage = cvCreateImage(cvSize(1280, 720), IPL_DEPTH_8U, 4);
            break;
    }
    
    cvCvtColor(grayTemp, convertedImage, CV_GRAY2RGB);
    
    cvReleaseImage(&grayTex);
    cvReleaseImage(&grayTemp);
    
    return convertedImage->imageDataOrigin;
}
Exemple #17
0
static IplImage *_threshold(IplImage *in) {
    IplImage *img = cvCreateImage(cvGetSize(in), 8, 1);

    // convert to grayscale
    cvCvtColor(in, img, CV_BGR2GRAY);

    // compute the mean intensity. This is used to adjust constant_reduction value below.
    long total = 0;
    for (int x = 0; x < img->width; ++x) {
        for (int y = 0; y < img->height; ++y) {
            CvScalar s = cvGet2D(img, y, x);
            total += s.val[0];
        }
    }
    int mean_intensity = (int)(total / (img->width * img->height));

    // apply thresholding (converts it to a binary image)
    // block_size observations: higher value does better for images with variable lighting (e.g.
    //   shadows).
    // may eventually need to paramaterize this, to some extent, because the different callers
    //   seem to do better with different values (e.g. contour location is better with smaller numbers,
    //   but cage location is better with larger...) but for now, have been able to settle on value
    //   which works pretty well for most cases.
    int block_size = (int)(img->width / 9);
    if ((block_size % 2) == 0) {
        // must be odd
        block_size += 1;
    }
    // constant_reduction observations: magic, but adapting this value to the mean intensity of the
    //   image as a whole seems to help.
    int constant_reduction = (int)(mean_intensity / 3.6 + 0.5);

    IplImage *threshold_image = cvCreateImage(cvGetSize(img), 8, 1);
    cvAdaptiveThreshold(img, threshold_image, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV,
        block_size, constant_reduction);
    cvReleaseImage(&img);

    // try to get rid of "noise" spots.
    int min_blob_size = 2;
    for (int x = 0; x < threshold_image->width; ++x) {
        for (int y = 0; y < threshold_image->height; ++y) {
            CvScalar s = cvGet2D(threshold_image, y, x);
            int ink_neighbors = 0;
            if (s.val[0] == 255) {
                for (int dx = -1; dx <= 1; ++dx) {
                    if ((x + dx >= 0) && (x + dx < threshold_image->width)) {
                        for (int dy = -1; dy <= 1; ++dy) {
                            if ((y + dy >= 0) && (y + dy < threshold_image->height)) {
                                if (! ((dy == 0) && (dx == 0))) {
                                    CvScalar m = cvGet2D(threshold_image, y + dy, x + dx);
                                    if (m.val[0] == 255) {
                                        ++ink_neighbors;
                                        if (ink_neighbors > min_blob_size) {
                                            break;
                                        }
                                    }
                                }
                            }
                        }
                        if (ink_neighbors > min_blob_size) {
                            break;
                        }
                    }
                }
                if (ink_neighbors <= min_blob_size) {
                    s.val[0] = 0;
                    cvSet2D(threshold_image, y, x, s);
                }
            }
        }
    }

    return threshold_image;
}
static int  aAdaptThreshold()
{

    CvPoint *cp;
    int parameter1 = 3;
    double parameter2 = 10;
    int width = 128;
    int height = 128;
    int kp = 5;
    int nPoints2 = 20;

    int fi = 0;
    int a2 = 20;
    int b2 = 25,xc,yc;

    double pi = 3.1415926;

    double lower, upper;
    unsigned seed;
    char rand;
    AtsRandState state;

    long diff_binary, diff_binary_inv;
    
    int l,i,j;

    IplImage *imBinary, *imBinary_inv, *imTo_zero, *imTo_zero_inv, *imInput, *imOutput;
    CvSize size;

    int code = TRS_OK;

//  read tests params 
    if(!trsiRead( &width, "128", "image width" ))
        return TRS_UNDEF;
    if(!trsiRead( &height, "128", "image height" ))
        return TRS_UNDEF;

//  initialized image
    l = width*height*sizeof(uchar);

    cp = (CvPoint*) trsmAlloc(nPoints2*sizeof(CvPoint));

    xc = (int)( width/2.);
    yc = (int)( height/2.);

    kp = nPoints2;

    size.width = width;
    size.height = height;

    int xmin = width;
    int ymin = height;
    int xmax = 0;
    int ymax = 0;
    
    
    for(i=0;i<nPoints2;i++)
    {
        cp[i].x = (int)(a2*cos(2*pi*i/nPoints2)*cos(2*pi*fi/360.))-
        (int)(b2*sin(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+xc;
        if(xmin> cp[i].x) xmin = cp[i].x;
        if(xmax< cp[i].x) xmax = cp[i].x;
        cp[i].y = (int)(a2*cos(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+
                    (int)(b2*sin(2*pi*i/nPoints2)*cos(2*pi*fi/360.))+yc;
        if(ymin> cp[i].y) ymin = cp[i].y;
        if(ymax< cp[i].y) ymax = cp[i].y;
    }

    if(xmax>width||xmin<0||ymax>height||ymin<0) return TRS_FAIL;
    
//  IPL image moment calculation  
//  create image  
    imBinary = cvCreateImage( size, 8, 1 );
    imBinary_inv = cvCreateImage( size, 8, 1 );
    imTo_zero = cvCreateImage( size, 8, 1 );
    imTo_zero_inv = cvCreateImage( size, 8, 1 );
    imOutput = cvCreateImage( size, 8, 1 );
    imInput = cvCreateImage( size, 8, 1 );

    int bgrn = 50;
    int signal = 150;
    
    memset(imInput->imageData,bgrn,l);

    cvFillPoly(imInput, &cp, &kp, 1, cvScalarAll(signal));

//  do noise   
    upper = 22;
    lower = -upper;
    seed = 345753;
    atsRandInit( &state, lower, upper, seed );
    
    uchar *input = (uchar*)imInput->imageData;
    uchar *binary = (uchar*)imBinary->imageData;
    uchar *binary_inv = (uchar*)imBinary_inv->imageData;
    uchar *to_zero = (uchar*)imTo_zero->imageData;
    uchar *to_zero_inv = (uchar*)imTo_zero_inv->imageData;
    double *parameter = (double*)trsmAlloc(2*sizeof(double));

    int step = imInput->widthStep;

    for(i = 0; i<size.height; i++, input+=step, binary+=step, binary_inv+=step, to_zero+=step,to_zero_inv+=step)
    {
         for(j = 0; j<size.width; j++)
         {
                atsbRand8s( &state, &rand, 1);   
                if(input[j] == bgrn) 
                {
                    binary[j] = to_zero[j] = (uchar)0;
                    binary_inv[j] = (uchar)255;
                    to_zero_inv[j] = input [j] = (uchar)(bgrn + rand);
                }
                else 
                {
                    binary[j] = (uchar)255;
                    binary_inv[j] = to_zero_inv[j] = (uchar)0;
                    to_zero[j] = input[j] = (uchar)(signal + rand);
                }
        
         }
    }



    cvAdaptiveThreshold( imInput, imOutput, (double)255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, parameter1, parameter2 ); 
    diff_binary = atsCompare1Db( (uchar*)imOutput->imageData, (uchar*)imBinary->imageData, l, 5);

    cvAdaptiveThreshold( imInput, imOutput, (double)255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, parameter1, parameter2 ); 
    diff_binary_inv = atsCompare1Db( (uchar*)imOutput->imageData, (uchar*)imBinary_inv->imageData, l, 5);

    if( diff_binary > 5 || diff_binary_inv > 5 )
        code = TRS_FAIL;  
    
    cvReleaseImage(&imInput);
    cvReleaseImage(&imOutput);
    cvReleaseImage(&imBinary);
    cvReleaseImage(&imBinary_inv);
    cvReleaseImage(&imTo_zero);
    cvReleaseImage(&imTo_zero_inv);

    trsWrite( ATS_CON | ATS_LST | ATS_SUM, "diff_binary =%ld \n", diff_binary); 
    trsWrite( ATS_CON | ATS_LST | ATS_SUM, "diff_binary_inv =%ld \n", diff_binary_inv); 

    trsFree(parameter);
    trsFree(cp);
    return code;
}
Exemple #19
0
int main( int argc, char** argv )
{

  IplImage* img;  			 		// input image object 
  IplImage* grayImg = NULL;  		// tmp image object
  IplImage* thresholdedImg = NULL;  // threshold output image object
  IplImage* dst;					// output connected components
	
  int windowSize = 3; // starting threshold value
  int constant = 0; // starting constant value
  CvCapture* capture = NULL; // capture object
	
  const char* windowName1 = "OPENCV: adaptive image thresholding"; // window name
  const char* windowName2 = "OPENCV: grayscale image"; // window name
  const char* windowName3 = "OPENCV: adaptive threshold image"; // window name

	
  bool keepProcessing = true;	// loop control flag
  char key;						// user input	
  int  EVENT_LOOP_DELAY = 40;	// delay for GUI window
                                // 40 ms equates to 1000ms/25fps = 40ms per frame	
	
	
	
  // if command line arguments are provided try to read image/video_name
  // otherwise default to capture from attached H/W camera 

    if( 
	  ( argc == 2 && (img = cvLoadImage( argv[1], 1)) != 0 ) ||
	  ( argc == 2 && (capture = cvCreateFileCapture( argv[1] )) != 0 ) || 
	  ( argc != 2 && (capture = cvCreateCameraCapture( CAMERA_INDEX )) != 0 )
	  )
    {
      // create window objects

      cvNamedWindow(windowName1, 0 );
      cvNamedWindow(windowName2, 0 );
      cvNamedWindow(windowName3, 0 );		

	  // add adjustable trackbar for threshold parameter
		
      cvCreateTrackbar("Neighbourhood (N)", windowName3, &windowSize, 255, NULL);		
	  cvCreateTrackbar("Constant (C)", windowName3, &constant, 50, NULL);
		
	  // if capture object in use (i.e. video/camera)
	  // get initial image from capture object
			
	  if (capture) {
	
		  // cvQueryFrame s just a combination of cvGrabFrame 
		  // and cvRetrieveFrame in one call.
			  
		  img = cvQueryFrame(capture);
		  if(!img){
			if (argc == 2){				  
				printf("End of video file reached\n");
			} else {
				printf("ERROR: cannot get next fram from camera\n");
			}
			exit(0);
		  }
			  
	  }
	  
	  // create output image
	  
	  thresholdedImg = cvCreateImage(cvSize(img->width,img->height), 
						   img->depth, 1);
	  thresholdedImg->origin = img->origin;
	  grayImg = cvCreateImage(cvSize(img->width,img->height), 
				img->depth, 1);
	  grayImg->origin = img->origin;

	  dst = cvCloneImage(img);
	  
	  // create a set of random labels
	  	  
	  CvRNG rng = cvRNG(-1);
	  CvMat* color_tab = cvCreateMat( 1, 255, CV_8UC3 );
            for(int i = 0; i < 255; i++ )
            {
                uchar* ptr = color_tab->data.ptr + i*3;
                ptr[0] = (uchar)(cvRandInt(&rng)%180 + 50);
                ptr[1] = (uchar)(cvRandInt(&rng)%180 + 50);
                ptr[2] = (uchar)(cvRandInt(&rng)%180 + 50);
            }
	  CvMemStorage* storage = cvCreateMemStorage(0);
			
      CvSeq* contours = 0;
	  CvSeq* current_contour;
      int comp_count = 0;
	  
	  // start main loop	
		
	  while (keepProcessing) {
		
		  // if capture object in use (i.e. video/camera)
		  // get image from capture object
			
		  if (capture) {
	
			  // cvQueryFrame s just a combination of cvGrabFrame 
			  // and cvRetrieveFrame in one call.
			  
			  img = cvQueryFrame(capture);
			  if(!img){
				if (argc == 2){				  
					printf("End of video file reached\n");
				} else {
					printf("ERROR: cannot get next fram from camera\n");
				}
				exit(0);
			  }
			  
		  }	  
		  
		  // if input is not already grayscale, convert to grayscale
		
		  if (img->nChannels > 1){
			 cvCvtColor(img, grayImg, CV_BGR2GRAY);
	      } else {
			grayImg = img;
		  }
	  
		  // display image in window

		  cvShowImage( windowName2, grayImg );
	
		  // check that the window size is always odd and > 3

		  if ((windowSize > 3) && (fmod((double) windowSize, 2) == 0)) {
				windowSize++;
		  } else if (windowSize < 3) {
			  windowSize = 3;
		  }
		  
		  // threshold the image and display
		  
		  cvAdaptiveThreshold(grayImg, thresholdedImg, 255,
		  						CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, 
		  						windowSize, constant);
		  cvShowImage( windowName3, thresholdedImg );
		  
		 // find the contours	
		  
		  cvFindContours( thresholdedImg, storage, 
		  		&contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
		  
		  // if (contours) (cvSeqSort(contours, sort_contour, NULL));
		  
		  // draw the contours in the output image
			
		    cvZero(dst);
			current_contour = contours;
			comp_count = 0;
            for( ; current_contour != 0; current_contour = current_contour->h_next, comp_count++ )
            {
				uchar* ptr = color_tab->data.ptr + (comp_count)*3;
				CvScalar color = CV_RGB( ptr[0], ptr[1], ptr[2] );
                cvDrawContours( dst, current_contour, color,
                                color, -1, CV_FILLED, 8, cvPoint(0,0) );
            }
			if (contours != NULL){
				cvClearSeq(contours);
			}

		  
		  // display images in window

		  cvShowImage( windowName1, dst );
			
		  // start event processing loop (very important,in fact essential for GUI)
	      // 40 ms roughly equates to 1000ms/25fps = 40ms per frame
		  
		  key = cvWaitKey(EVENT_LOOP_DELAY);

		  if (key == 'x'){
			
	   		// if user presses "x" then exit
			
	   			printf("Keyboard exit requested : exiting now - bye!\n");	
	   			keepProcessing = false;
		  } 
	  }
      
      // destroy window objects
      // (triggered by event loop *only* window is closed)

      cvDestroyAllWindows();

      // destroy image object (if it does not originate from a capture object)

      if (!capture){
		  cvReleaseImage( &img );
      }
	  
	  // destroy image objects

      cvReleaseImage( &grayImg );
	  cvReleaseImage( &thresholdedImg );
	  cvReleaseImage( &dst );

      // all OK : main returns 0

      return 0;
    }

    // not OK : main returns -1

    return -1;
}
int catcierge_haar_matcher_find_prey_adaptive(catcierge_haar_matcher_t *ctx,
											IplImage *img, IplImage *inv_thr_img,
											match_result_t *result, int save_steps)
{
	IplImage *inv_adpthr_img = NULL;
	IplImage *inv_combined = NULL;
	IplImage *open_combined = NULL;
	IplImage *dilate_combined = NULL;
	CvSeq *contours = NULL;
	size_t contour_count = 0;
	CvSize img_size;
	assert(ctx);
	assert(img);
	assert(ctx->args);

	img_size = cvGetSize(img);

	// We expect to be given an inverted global thresholded image (inv_thr_img)
	// that contains the rough cat profile.

	// Do an inverted adaptive threshold of the original image as well.
	// This brings out small details such as a mouse tail that fades
	// into the background during a global threshold.
	inv_adpthr_img = cvCreateImage(img_size, 8, 1);
	cvAdaptiveThreshold(img, inv_adpthr_img, 255,
		CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY_INV, 11, 5);
	catcierge_haar_matcher_save_step_image(ctx,
		inv_adpthr_img, result, "adp_thresh", "Inverted adaptive threshold", save_steps);

	// Now we can combine the two thresholded images into one.
	inv_combined = cvCreateImage(img_size, 8, 1);
	cvAdd(inv_thr_img, inv_adpthr_img, inv_combined, NULL);
	catcierge_haar_matcher_save_step_image(ctx,
		inv_combined, result, "inv_combined", "Combined global and adaptive threshold", save_steps);

	// Get rid of noise from the adaptive threshold.
	open_combined = cvCreateImage(img_size, 8, 1);
	cvMorphologyEx(inv_combined, open_combined, NULL, ctx->kernel2x2, CV_MOP_OPEN, 2);
	catcierge_haar_matcher_save_step_image(ctx,
		open_combined, result, "opened", "Opened image", save_steps);

	dilate_combined = cvCreateImage(img_size, 8, 1);
	cvDilate(open_combined, dilate_combined, ctx->kernel3x3, 3);
	catcierge_haar_matcher_save_step_image(ctx,
		dilate_combined, result, "dilated", "Dilated image", save_steps);

	// Invert back the result so the background is white again.
	cvNot(dilate_combined, dilate_combined);
	catcierge_haar_matcher_save_step_image(ctx,
		dilate_combined, result, "combined", "Combined binary image", save_steps);

	cvFindContours(dilate_combined, ctx->storage, &contours,
		sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));

	// If we get more than 1 contour we count it as a prey.
	contour_count = catcierge_haar_matcher_count_contours(ctx, contours);

	if (save_steps)
	{
		IplImage *img_contour = cvCloneImage(img);
		IplImage *img_final_color = NULL;
		CvScalar color;

		cvDrawContours(img_contour, contours, cvScalarAll(255), cvScalarAll(0), 1, 1, 8, cvPoint(0, 0));
		catcierge_haar_matcher_save_step_image(ctx,
			img_contour, result, "contours", "Background contours", save_steps);

		// Draw a final color combined image with the Haar detection + contour.
		cvResetImageROI(img_contour);

		img_final_color =  cvCreateImage(cvGetSize(img_contour), 8, 3);

		cvCvtColor(img_contour, img_final_color, CV_GRAY2BGR);
		color = (contour_count > 1) ? CV_RGB(255, 0, 0) : CV_RGB(0, 255, 0);
		cvRectangleR(img_final_color, result->match_rects[0], color, 2, 8, 0);

		catcierge_haar_matcher_save_step_image(ctx,
			img_final_color, result, "final", "Final image", save_steps);

		cvReleaseImage(&img_contour);
		cvReleaseImage(&img_final_color);
	}

	cvReleaseImage(&inv_adpthr_img);
	cvReleaseImage(&inv_combined);
	cvReleaseImage(&open_combined);
	cvReleaseImage(&dilate_combined);

	return (contour_count > 1);
}
int main(int argc, char* argv[])
{
        IplImage *image=0, *gray=0, *dst=0, *dst2=0;
        IplImage* r=0, *g=0, *b=0; // для хранения отдельных слоёв RGB-изображения

        // имя картинки задаётся первым параметром
        const char* filename = argc >= 2 ? argv[1] : "../tests/rectangle_5_1_6.jpg";
        // получаем картинку
        image = cvLoadImage(filename, 1);

        printf("[i] image: %s\n", filename);
        assert( image != 0 );

        // покажем изображение
        cvNamedWindow( "original", 1 );
        cvShowImage( "original", image );

        // картинка для хранения изображения в градациях серого
        gray = cvCreateImage(cvGetSize(image), image->depth, 1);

        // преобразуем картинку в градации серого
        cvConvertImage(image, gray, CV_BGR2GRAY);

        // покажем серую картинку
        cvNamedWindow( "gray", 1 );
        cvShowImage( "gray", gray );

        dst = cvCreateImage( cvGetSize(gray), IPL_DEPTH_8U, 1);
        dst2 = cvCreateImage( cvGetSize(gray), IPL_DEPTH_8U, 1);

        // пороговые преобразования над картинкой в градациях серого
        cvThreshold(gray, dst, 50, 250, CV_THRESH_BINARY);
        cvAdaptiveThreshold(gray, dst2, 250, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, 1);

        // показываем результаты
        cvNamedWindow( "cvThreshold", 1 );
        cvShowImage( "cvThreshold", dst);
        cvNamedWindow( "cvAdaptiveThreshold", 1 );
        cvShowImage( "cvAdaptiveThreshold", dst2);

        //=======================================================
        //
        // проведём пороговое преобразование над RGB-картинкой
        //

        r = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
        g = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
        b = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);

        // разбиваем на отдельные слои
        cvSplit(image, b, g, r, 0);

        // картинка для хранения промежуточных результатов
        IplImage* temp = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 1 ); 

        // складываем картинки с одинаковым весом
        cvAddWeighted( r, 1.0/3.0, g, 1.0/3.0, 0.0, temp );
        cvAddWeighted( temp, 2.0/3.0, b, 1.0/3.0, 0.0, temp );

        // выполняем пороговое преобразование 
        cvThreshold(temp, dst, 50, 250, CV_THRESH_BINARY);

        cvReleaseImage(&temp);

        // показываем результат
        cvNamedWindow( "RGB cvThreshold", 1 );
        cvShowImage( "RGB cvThreshold", dst);

        //
        // попробуем пороговое преобразование над отдельными слоями
        //

        IplImage* t1, *t2, *t3; // для промежуточного хранения
        t1 = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
        t2 = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
        t3 = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);

        // выполняем пороговое преобразование
        cvThreshold(r, t1, 50, 250, CV_THRESH_BINARY);
        cvThreshold(g, t2, 50, 250, CV_THRESH_BINARY);
        cvThreshold(b, t3, 50, 250, CV_THRESH_BINARY);

        // складываем результаты
        cvMerge(t3, t2, t1, 0, image);

        cvNamedWindow( "RGB cvThreshold 2", 1 );
        cvShowImage( "RGB cvThreshold 2", image);

        cvReleaseImage(&t1); cvReleaseImage(&t2); cvReleaseImage(&t3);

        //=======================================================

        // ждём нажатия клавиши
        cvWaitKey(0);

        // освобождаем ресурсы
        cvReleaseImage(& image);
        cvReleaseImage(&gray);
        cvReleaseImage(&dst);
        cvReleaseImage(&dst2);

        cvReleaseImage(&r); cvReleaseImage(&g); cvReleaseImage(&b);

        // удаляем окна
        cvDestroyAllWindows();
        return 0;
}
int
MotionCells::performDetectionMotionCells (IplImage * p_frame,
    double p_sensitivity, double p_framerate, int p_gridx, int p_gridy,
    gint64 timestamp_millisec, bool p_isVisible, bool p_useAlpha,
    int motionmaskcoord_count, motionmaskcoordrect * motionmaskcoords,
    int motionmaskcells_count, motioncellidx * motionmaskcellsidx,
    cellscolor motioncellscolor, int motioncells_count,
    motioncellidx * motioncellsidx, gint64 starttime, char *p_datafile,
    bool p_changed_datafile, int p_thickness)
{

  int sumframecnt = 0;
  int ret = 0;
  p_framerate >= 1 ? p_framerate <= 5 ? sumframecnt = 1
      : p_framerate <= 10 ? sumframecnt = 2
      : p_framerate <= 15 ? sumframecnt = 3
      : p_framerate <= 20 ? sumframecnt = 4
      : p_framerate <= 25 ? sumframecnt = 5
      : p_framerate <= 30 ? sumframecnt = 6 : sumframecnt = 0 : sumframecnt = 0;

  m_framecnt++;
  m_changed_datafile = p_changed_datafile;
  if (m_framecnt >= sumframecnt) {
    m_useAlpha = p_useAlpha;
    m_gridx = p_gridx;
    m_gridy = p_gridy;
    if (m_changed_datafile) {
      ret = initDataFile (p_datafile, starttime);
      if (ret != 0)
        return ret;
    }

    m_frameSize = cvGetSize (p_frame);
    m_frameSize.width /= 2;
    m_frameSize.height /= 2;
    setMotionCells (m_frameSize.width, m_frameSize.height);
    m_sensitivity = 1 - p_sensitivity;
    m_isVisible = p_isVisible;
    m_pcurFrame = cvCloneImage (p_frame);
    IplImage *m_pcurgreyImage = cvCreateImage (m_frameSize, IPL_DEPTH_8U, 1);
    IplImage *m_pprevgreyImage = cvCreateImage (m_frameSize, IPL_DEPTH_8U, 1);
    IplImage *m_pgreyImage = cvCreateImage (m_frameSize, IPL_DEPTH_8U, 1);
    IplImage *m_pcurDown =
        cvCreateImage (m_frameSize, m_pcurFrame->depth, m_pcurFrame->nChannels);
    IplImage *m_pprevDown = cvCreateImage (m_frameSize, m_pprevFrame->depth,
        m_pprevFrame->nChannels);
    m_pbwImage = cvCreateImage (m_frameSize, IPL_DEPTH_8U, 1);
    cvPyrDown (m_pprevFrame, m_pprevDown);
    cvCvtColor (m_pprevDown, m_pprevgreyImage, CV_RGB2GRAY);
    cvPyrDown (m_pcurFrame, m_pcurDown);
    cvCvtColor (m_pcurDown, m_pcurgreyImage, CV_RGB2GRAY);
    m_pdifferenceImage = cvCloneImage (m_pcurgreyImage);
    //cvSmooth(m_pcurgreyImage, m_pcurgreyImage, CV_GAUSSIAN, 3, 0);//TODO camera noise reduce,something smoothing, and rethink runningavg weights

    //Minus the current gray frame from the 8U moving average.
    cvAbsDiff (m_pprevgreyImage, m_pcurgreyImage, m_pdifferenceImage);

    //Convert the image to black and white.
    cvAdaptiveThreshold (m_pdifferenceImage, m_pbwImage, 255,
        CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY_INV, 7);

    // Dilate and erode to get object blobs
    cvDilate (m_pbwImage, m_pbwImage, NULL, 2);
    cvErode (m_pbwImage, m_pbwImage, NULL, 2);

    //mask-out the overlay on difference image
    if (motionmaskcoord_count > 0)
      performMotionMaskCoords (motionmaskcoords, motionmaskcoord_count);
    if (motionmaskcells_count > 0)
      performMotionMask (motionmaskcellsidx, motionmaskcells_count);
    if (getIsNonZero (m_pbwImage)) {    //detect Motion
      if (m_MotionCells.size () > 0)    //it contains previous motioncells what we used when frames dropped
        m_MotionCells.clear ();
      if (transparencyimg)
        cvReleaseImage (&transparencyimg);
      (motioncells_count > 0) ?
          calculateMotionPercentInMotionCells (motioncellsidx,
          motioncells_count)
          : calculateMotionPercentInMotionCells (motionmaskcellsidx, 0);

      transparencyimg = cvCreateImage (cvGetSize (p_frame), p_frame->depth, 3);
      cvSetZero (transparencyimg);
      if (m_motioncellsidxcstr)
        delete[]m_motioncellsidxcstr;
      m_motioncells_idx_count = m_MotionCells.size () * MSGLEN; //one motion cell idx: (lin idx : col idx,) it's 4 character except last motion cell idx
      m_motioncellsidxcstr = new char[m_motioncells_idx_count];
      char *tmpstr = new char[MSGLEN];
      for (int i = 0; i < MSGLEN; i++)
        tmpstr[i] = ' ';
      for (unsigned int i = 0; i < m_MotionCells.size (); i++) {
        CvPoint pt1, pt2;
        pt1.x = m_MotionCells.at (i).cell_pt1.x * 2;
        pt1.y = m_MotionCells.at (i).cell_pt1.y * 2;
        pt2.x = m_MotionCells.at (i).cell_pt2.x * 2;
        pt2.y = m_MotionCells.at (i).cell_pt2.y * 2;
        if (m_useAlpha && m_isVisible) {
          cvRectangle (transparencyimg,
              pt1,
              pt2,
              CV_RGB (motioncellscolor.B_channel_value,
                  motioncellscolor.G_channel_value,
                  motioncellscolor.R_channel_value), CV_FILLED);
        } else if (m_isVisible) {
          cvRectangle (p_frame,
              pt1,
              pt2,
              CV_RGB (motioncellscolor.B_channel_value,
                  motioncellscolor.G_channel_value,
                  motioncellscolor.R_channel_value), p_thickness);
        }

        if (i < m_MotionCells.size () - 1) {
          snprintf (tmpstr, MSGLEN, "%d:%d,", m_MotionCells.at (i).lineidx,
              m_MotionCells.at (i).colidx);
        } else {
          snprintf (tmpstr, MSGLEN, "%d:%d", m_MotionCells.at (i).lineidx,
              m_MotionCells.at (i).colidx);
        }
        if (i == 0)
          strncpy (m_motioncellsidxcstr, tmpstr, m_motioncells_idx_count);
        else
          strcat (m_motioncellsidxcstr, tmpstr);
      }
      if (m_MotionCells.size () == 0)
        strncpy (m_motioncellsidxcstr, " ", m_motioncells_idx_count);

      if (m_useAlpha && m_isVisible) {
        if (m_MotionCells.size () > 0)
          blendImages (p_frame, transparencyimg, m_alpha, m_beta);
      }

      delete[]tmpstr;

      if (mc_savefile && m_saveInDatafile) {
        ret = saveMotionCells (timestamp_millisec);
        if (ret != 0)
          return ret;
      }
    } else {
      m_motioncells_idx_count = 0;
      if (m_MotionCells.size () > 0)
        m_MotionCells.clear ();
      if (transparencyimg)
        cvReleaseImage (&transparencyimg);
    }

    if (m_pprevFrame)
      cvReleaseImage (&m_pprevFrame);
    m_pprevFrame = cvCloneImage (m_pcurFrame);
    m_framecnt = 0;
    if (m_pcurFrame)
      cvReleaseImage (&m_pcurFrame);
    if (m_pdifferenceImage)
      cvReleaseImage (&m_pdifferenceImage);
    if (m_pcurgreyImage)
      cvReleaseImage (&m_pcurgreyImage);
    if (m_pprevgreyImage)
      cvReleaseImage (&m_pprevgreyImage);
    if (m_pgreyImage)
      cvReleaseImage (&m_pgreyImage);
    if (m_pbwImage)
      cvReleaseImage (&m_pbwImage);
    if (m_pprevDown)
      cvReleaseImage (&m_pprevDown);
    if (m_pcurDown)
      cvReleaseImage (&m_pcurDown);
    if (m_pCells) {
      for (int i = 0; i < m_gridy; ++i) {
        delete[]m_pCells[i];
      }
      delete[]m_pCells;
    }

    if (p_framerate <= 5) {
      if (m_MotionCells.size () > 0)
        m_MotionCells.clear ();
      if (transparencyimg)
        cvReleaseImage (&transparencyimg);
    }
  } else {                      //we do frame drop
    m_motioncells_idx_count = 0;
    ret = -2;
    for (unsigned int i = 0; i < m_MotionCells.size (); i++) {
      CvPoint pt1, pt2;
      pt1.x = m_MotionCells.at (i).cell_pt1.x * 2;
      pt1.y = m_MotionCells.at (i).cell_pt1.y * 2;
      pt2.x = m_MotionCells.at (i).cell_pt2.x * 2;
      pt2.y = m_MotionCells.at (i).cell_pt2.y * 2;
      if (m_useAlpha && m_isVisible) {
        cvRectangle (transparencyimg,
            pt1,
            pt2,
            CV_RGB (motioncellscolor.B_channel_value,
                motioncellscolor.G_channel_value,
                motioncellscolor.R_channel_value), CV_FILLED);
      } else if (m_isVisible) {
        cvRectangle (p_frame,
            pt1,
            pt2,
            CV_RGB (motioncellscolor.B_channel_value,
                motioncellscolor.G_channel_value,
                motioncellscolor.R_channel_value), p_thickness);
      }

    }
    if (m_useAlpha && m_isVisible) {
      if (m_MotionCells.size () > 0)
        blendImages (p_frame, transparencyimg, m_alpha, m_beta);
    }
  }
  return ret;
}
Exemple #23
0
//--------------------------------------------------------------
void testApp::update(){
	

    bool bNewFrame = false;

	#ifdef _USE_LIVE_VIDEO
       vidGrabber.grabFrame();
	   bNewFrame = vidGrabber.isFrameNew();
    #else
        vidPlayer.idleMovie();
        bNewFrame = vidPlayer.isFrameNew();
	#endif

	if (bNewFrame){

		#ifdef _USE_LIVE_VIDEO
            colorImg.setFromPixels(vidGrabber.getPixels(), cw,ch);
	    #else
            colorImg.setFromPixels(vidPlayer.getPixels(), cw,ch);
        #endif

		
		kx = (float) ofGetWidth()  / cw;
		ky = (float) ofGetHeight() / ch;
		
		cvSmooth(colorImg.getCvImage(), medianImg.getCvImage(), CV_MEDIAN, medianValue, medianValue);
		medianImg.flagImageChanged();
				
		grayImage = medianImg;
		
		cvCvtColor(colorImg.getCvImage(), hsvImage.getCvImage(), CV_RGB2HSV);
		hsvImage.flagImageChanged();
		
		cvSetImageCOI(hsvImage.getCvImage(), 2);
		cvCopy(hsvImage.getCvImage(), satImage.getCvImage());
		satImage.flagImageChanged();
		cvSetImageCOI(hsvImage.getCvImage(), 0);
		
		//cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_BLUR, 3, 3, 0, 0);
		
		cvAdaptiveThreshold(grayImage.getCvImage(), trsImage.getCvImage(), 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, adaptiveThreshValue);
		//cvCanny(trsImage.getCvImage(), trsImage.getCvImage(), sb, sb*4, 3);
		trsImage.flagImageChanged();
		
		
//		cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_MEDIAN, 7, 7);
		
//		cvSmooth( iplImage, iplImage, CV_BLUR, br, br, 0, 0 );
//		cvSmooth( iplImage, iplImage, CV_MEDIAN, 7, 7);
		cvCanny(  grayImage.getCvImage(), cannyImage.getCvImage(), cannyThresh1Value, cannyThresh2Value, cannyApertureValue);
		cannyImage.flagImageChanged();
			
		//cvPyrMeanShiftFiltering(colorImg.getCvImage(), colorImg.getCvImage(), 20, 40, 2);
		
		if (mode==MODE_DRAWING) {

			if (draw_edges) {

				#if PROBABILISTIC_LINE
					lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, lineThreshValue, lineMinLengthValue, lineMaxGapValue );
				#else
					lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );
				#endif
			
			}
			
			if (draw_contours || draw_approx) {
				cvFindContours(cannyImage.getCvImage(), edgesStorage, &edgeContours);
				
				CvSeq* contour = edgeContours;
				while (contour!=NULL) {
					for (int j = 0; j < contour->total; j++){
						CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
						p1->x = p1->x*(float)kx;
						p1->y = p1->y*(float)ky;
					}
					contour = contour->h_next;
				}
				
			}

			if (draw_fills) {
				cvFindContours(trsImage.getCvImage(), fillsStorage, &fillContours);
				
				CvSeq* contour = fillContours;
				while (contour!=NULL) {
					for (int j = 0; j < contour->total; j++){
						CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
						p1->x = p1->x*(float)kx;
						p1->y = p1->y*(float)ky;
					}
					contour = contour->h_next;
				}
			}
		}

	}
	
	
	// update scope
//	float* rand = new float[50];
//	for(int i=0 ;i<50; i++){
//		rand[i] = ofRandom(-1.0,1);
//		
//	}
//	
//	gui->update(scope, kofxGui_Set_FloatArray, rand, sizeof(float*));
//	
//	// make 3 seconds loop
//	float f = ((ofGetElapsedTimeMillis()%3000) / 3000.0);
//	gui->update(points, kofxGui_Set_Float, &f, sizeof(float));

}
Exemple #24
0
IplImage * find_macbeth( const char *img )
{
    IplImage * macbeth_img = cvLoadImage( img,
        CV_LOAD_IMAGE_ANYCOLOR|CV_LOAD_IMAGE_ANYDEPTH );
        
    IplImage * macbeth_original = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, macbeth_img->nChannels );
    cvCopy(macbeth_img, macbeth_original);
        
    IplImage * macbeth_split[3];
    IplImage * macbeth_split_thresh[3];
    
    for(int i = 0; i < 3; i++) {
        macbeth_split[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
        macbeth_split_thresh[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
    }
    
    cvSplit(macbeth_img, macbeth_split[0], macbeth_split[1], macbeth_split[2], NULL);
    
    if( macbeth_img )
    {
        int adaptive_method = CV_ADAPTIVE_THRESH_MEAN_C;
        int threshold_type = CV_THRESH_BINARY_INV;
        int block_size = cvRound(
            MIN(macbeth_img->width,macbeth_img->height)*0.02)|1;
        fprintf(stderr,"Using %d as block size\n", block_size);
        
        double offset = 6;
        
        // do an adaptive threshold on each channel
        for(int i = 0; i < 3; i++) {
            cvAdaptiveThreshold(macbeth_split[i], macbeth_split_thresh[i], 255, adaptive_method, threshold_type, block_size, offset);
        }
        
        IplImage * adaptive = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), IPL_DEPTH_8U, 1 );
        
        // OR the binary threshold results together
        cvOr(macbeth_split_thresh[0],macbeth_split_thresh[1],adaptive);
        cvOr(macbeth_split_thresh[2],adaptive,adaptive);
        
        for(int i = 0; i < 3; i++) {
            cvReleaseImage( &(macbeth_split[i]) );
            cvReleaseImage( &(macbeth_split_thresh[i]) );
        }
                
        int element_size = (block_size/10)+2;
        fprintf(stderr,"Using %d as element size\n", element_size);
        
        // do an opening on the threshold image
        IplConvKernel * element = cvCreateStructuringElementEx(element_size,element_size,element_size/2,element_size/2,CV_SHAPE_RECT);
        cvMorphologyEx(adaptive,adaptive,NULL,element,CV_MOP_OPEN);
        cvReleaseStructuringElement(&element);
        
        CvMemStorage* storage = cvCreateMemStorage(0);
        
        CvSeq* initial_quads = cvCreateSeq( 0, sizeof(*initial_quads), sizeof(void*), storage );
        CvSeq* initial_boxes = cvCreateSeq( 0, sizeof(*initial_boxes), sizeof(CvBox2D), storage );
        
        // find contours in the threshold image
        CvSeq * contours = NULL;
        cvFindContours(adaptive,storage,&contours);
        
        int min_size = (macbeth_img->width*macbeth_img->height)/
            (MACBETH_SQUARES*100);
        
        if(contours) {
            int count = 0;
            
            for( CvSeq* c = contours; c != NULL; c = c->h_next) {
                CvRect rect = ((CvContour*)c)->rect;
                // only interested in contours with these restrictions
                if(CV_IS_SEQ_HOLE(c) && rect.width*rect.height >= min_size) {
                    // only interested in quad-like contours
                    CvSeq * quad_contour = find_quad(c, storage, min_size);
                    if(quad_contour) {
                        cvSeqPush( initial_quads, &quad_contour );
                        count++;
                        rect = ((CvContour*)quad_contour)->rect;
                        
                        CvScalar average = contour_average((CvContour*)quad_contour, macbeth_img);
                        
                        CvBox2D box = cvMinAreaRect2(quad_contour,storage);
                        cvSeqPush( initial_boxes, &box );
                        
                        // fprintf(stderr,"Center: %f %f\n", box.center.x, box.center.y);
                        
                        double min_distance = MAX_RGB_DISTANCE;
                        CvPoint closest_color_idx = cvPoint(-1,-1);
                        for(int y = 0; y < MACBETH_HEIGHT; y++) {
                            for(int x = 0; x < MACBETH_WIDTH; x++) {
                                double distance = euclidean_distance_lab(average,colorchecker_srgb[y][x]);
                                if(distance < min_distance) {
                                    closest_color_idx.x = x;
                                    closest_color_idx.y = y;
                                    min_distance = distance;
                                }
                            }
                        }
                        
                        CvScalar closest_color = colorchecker_srgb[closest_color_idx.y][closest_color_idx.x];
                        // fprintf(stderr,"Closest color: %f %f %f (%d %d)\n",
                        //     closest_color.val[2],
                        //     closest_color.val[1],
                        //     closest_color.val[0],
                        //     closest_color_idx.x,
                        //     closest_color_idx.y
                        // );
                        
                        // cvDrawContours(
                        //     macbeth_img,
                        //     quad_contour,
                        //     cvScalar(255,0,0),
                        //     cvScalar(0,0,255),
                        //     0,
                        //     element_size
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     cvScalarAll(255),
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     closest_color,
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*4,
                        //     average,
                        //     -1
                        // );
                        // CvRect rect = contained_rectangle(box);
                        // cvRectangle(
                        //     macbeth_img,
                        //     cvPoint(rect.x,rect.y),
                        //     cvPoint(rect.x+rect.width, rect.y+rect.height),
                        //     cvScalarAll(0),
                        //     element_size
                        // );
                    }
                }
            }
            
            ColorChecker found_colorchecker;

            fprintf(stderr,"%d initial quads found", initial_quads->total);
            if(count > MACBETH_SQUARES) {
                fprintf(stderr," (probably a Passport)\n");
                
                CvMat* points = cvCreateMat( initial_quads->total , 1, CV_32FC2 );
                CvMat* clusters = cvCreateMat( initial_quads->total , 1, CV_32SC1 );
                
                CvSeq* partitioned_quads[2];
                CvSeq* partitioned_boxes[2];
                for(int i = 0; i < 2; i++) {
                    partitioned_quads[i] = cvCreateSeq( 0, sizeof(**partitioned_quads), sizeof(void*), storage );
                    partitioned_boxes[i] = cvCreateSeq( 0, sizeof(**partitioned_boxes), sizeof(CvBox2D), storage );
                }
                
                // set up the points sequence for cvKMeans2, using the box centers
                for(int i = 0; i < initial_quads->total; i++) {
                    CvBox2D box = (*(CvBox2D*)cvGetSeqElem(initial_boxes, i));
                    
                    cvSet1D(points, i, cvScalar(box.center.x,box.center.y));
                }
                
                // partition into two clusters: passport and colorchecker
                cvKMeans2( points, 2, clusters, 
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,
                                           10, 1.0 ) );
        
                for(int i = 0; i < initial_quads->total; i++) {
                    CvPoint2D32f pt = ((CvPoint2D32f*)points->data.fl)[i];
                    int cluster_idx = clusters->data.i[i];
                    
                    cvSeqPush( partitioned_quads[cluster_idx],
                               cvGetSeqElem(initial_quads, i) );
                    cvSeqPush( partitioned_boxes[cluster_idx],
                               cvGetSeqElem(initial_boxes, i) );

                    // cvCircle(
                    //     macbeth_img,
                    //     cvPointFrom32f(pt),
                    //     element_size*2,
                    //     cvScalar(255*cluster_idx,0,255-(255*cluster_idx)),
                    //     -1
                    // );
                }
                
                ColorChecker partitioned_checkers[2];
                
                // check each of the two partitioned sets for the best colorchecker
                for(int i = 0; i < 2; i++) {
                    partitioned_checkers[i] =
                        find_colorchecker(partitioned_quads[i], partitioned_boxes[i],
                                      storage, macbeth_img, macbeth_original);
                }
                
                // use the colorchecker with the lowest error
                found_colorchecker = partitioned_checkers[0].error < partitioned_checkers[1].error ?
                    partitioned_checkers[0] : partitioned_checkers[1];
                
                cvReleaseMat( &points );
                cvReleaseMat( &clusters );
            }
            else { // just one colorchecker to test
                fprintf(stderr,"\n");
                found_colorchecker = find_colorchecker(initial_quads, initial_boxes,
                                  storage, macbeth_img, macbeth_original);
            }
            
            // render the found colorchecker
            draw_colorchecker(found_colorchecker.values,found_colorchecker.points,macbeth_img,found_colorchecker.size);
            
            // print out the colorchecker info
            for(int y = 0; y < MACBETH_HEIGHT; y++) {            
                for(int x = 0; x < MACBETH_WIDTH; x++) {
                    CvScalar this_value = cvGet2D(found_colorchecker.values,y,x);
                    CvScalar this_point = cvGet2D(found_colorchecker.points,y,x);
                    
                    printf("%.0f,%.0f,%.0f,%.0f,%.0f\n",
                        this_point.val[0],this_point.val[1],
                        this_value.val[2],this_value.val[1],this_value.val[0]);
                }
            }
            printf("%0.f\n%f\n",found_colorchecker.size,found_colorchecker.error);
            
        }
                
        cvReleaseMemStorage( &storage );
        
        if( macbeth_original ) cvReleaseImage( &macbeth_original );
        if( adaptive ) cvReleaseImage( &adaptive );
        
        return macbeth_img;
    }

    if( macbeth_img ) cvReleaseImage( &macbeth_img );

    return NULL;
}
Exemple #25
0
int main(int argc, char** argv)
{
	bool srcbin = 0;
	bool invbk = 0;
	if(argc < 3){
		printf("Not enough args!\narg1: target image\narg2: source image\narg3: do source image adaptive threshold or not\narg4: invert back ground or not\n");
		getchar();
		return 1;
	}
	if(argc >= 4){
		if(!strcmp(argv[3], "1"))
			srcbin = 1;
	}
	if(argc >= 5){
		if(!strcmp(argv[4], "1"))
			invbk = 1;
	}

	IplImage* srcimg= 0, *srcimgb= 0, *srcimgb2 = 0, *bimg = 0, *b2img = 0,*bugimg = 0, *alg2dst = 0;
	srcimg= cvLoadImage(argv[2], 1);
	if (!srcimg)
	{
		printf("src img %s load failed!\n", argv[2]);
		getchar();
		return 1;
	}
	
	//choosing the parameters for our ccl
	int bn = 8; //how many partitions
	int nwidth = 512;
	if(srcimg->width > 512){
		nwidth = 1024;
		bn = 6;
	}
	if(srcimg->width > 1024){
		nwidth = 2048;
		bn = 3;
	}
	if(srcimg->width > 2048){
		printf("warning, image too wide, max support 2048. image is truncated.\n");
		getchar();
		return 1;
	}
	
	//start selection gpu devices
	int devCount;
	int smCnt = 0;
    cudaGetDeviceCount(&devCount);
 
    // Iterate through devices
	int devChosen = 0;
    for (int i = 0; i < devCount; ++i)
    {
        cudaDeviceProp devProp;
        cudaGetDeviceProperties(&devProp, i);
		if(devProp.major >= 2){//only one device supported
			smCnt = max(smCnt, devProp.multiProcessorCount);
			if(devProp.multiProcessorCount == smCnt)
				devChosen = i;
		}
    }
	
	if(smCnt == 0){
		//our ccl require CUDA cap 2.0 or above, but the Ostava's ccl can be run on any CUDA gpu
		printf("Error, no device with cap 2.x found. Only cpu alg will be run.\n");
		getchar();
		return 1;
	}
	
	if(smCnt != 0){
		cudaSetDevice(devChosen);
		bn = bn * smCnt;
	}

	int nheight = (cvGetSize(srcimg).height-2) / (2*bn);
	if((nheight*2*bn+2) < cvGetSize(srcimg).height)
		nheight++;
	nheight = nheight*2*bn+2;

	if(smCnt != 0)
		printf("gpu ccl for image width 512, 1024, 2048.\nchoosing device %d, width %d, height %d, blocks %d\n", devChosen, nwidth, nheight, bn);

	srcimgb= cvCreateImage(cvSize(nwidth, cvGetSize(srcimg).height),IPL_DEPTH_8U,1);
	srcimgb2= cvCreateImage(cvSize(nwidth, cvGetSize(srcimg).height),IPL_DEPTH_8U,1);
	cvSetImageROI(srcimg, cvRect(0, 0, min(cvGetSize(srcimg).width, nwidth), cvGetSize(srcimg).height));
	cvSetImageROI(srcimgb2, cvRect(0, 0, min(cvGetSize(srcimg).width, nwidth), cvGetSize(srcimg).height));
	cvSet(srcimgb2, cvScalar(0,0,0));
	cvCvtColor(srcimg, srcimgb2, CV_BGRA2GRAY);
	cvResetImageROI(srcimgb2);
	cvReleaseImage(&srcimg);
	if(srcbin)
		cvAdaptiveThreshold(srcimgb2, srcimgb, 1.0, CV_ADAPTIVE_THRESH_MEAN_C, invbk ? CV_THRESH_BINARY_INV :  CV_THRESH_BINARY);
	else
		cvThreshold(srcimgb2, srcimgb, 0.0, 1.0, invbk ? CV_THRESH_BINARY_INV :  CV_THRESH_BINARY);
	boundCheck(srcimgb);

	cvScale(srcimgb, srcimgb2, 255);
	//the source binary image to be labeled is saved as bsrc.bmp
	cvSaveImage("bsrc.bmp", srcimgb2);
	cvSet(srcimgb2, cvScalar(0,0,0));
	
	float elapsedMilliSeconds1;
	{//begin cpu labeling algorithm, the SBLA proposed by Zhao
		LABELDATATYPE *data=(LABELDATATYPE *)malloc(srcimgb->width * srcimgb->height * sizeof(LABELDATATYPE));
	
		for(int j = 0; j<srcimgb->height; j++)
			for(int i = 0; i<srcimgb->width; i++)
				data[i + j*srcimgb->width] = (srcimgb->imageData[i + j*srcimgb->widthStep]) ? 1 : 0;

		int iNumLabels;
		CPerformanceCounter perf;
		perf.Start();
    	iNumLabels = LabelSBLA(data, srcimgb->width, srcimgb->height);
		elapsedMilliSeconds1 = (float)perf.GetElapsedMilliSeconds();
		printf("cpu SBLA used %f ms, total labels %u\n", elapsedMilliSeconds1, iNumLabels);
		free(data);
	}
	
	IplImage *src2(0),*dst2(0);
	int iNumLabels;
	float elapsedMilliSeconds2;
	{//begin cpu labeling algorithm, the BBDT proposed by C. Grana, D. Borghesani, R. Cucchiara
		CPerformanceCounter perf;
		src2 = cvCreateImage( cvGetSize(srcimgb), IPL_DEPTH_8U, 1 );
		cvCopyImage(srcimgb,src2);
		dst2 = cvCreateImage( cvGetSize(srcimgb), IPL_DEPTH_32S, 1 );
		perf.Start();
		cvLabelingImageLab(src2, dst2, 1, &iNumLabels);
		elapsedMilliSeconds2 = (float)perf.GetElapsedMilliSeconds();
		printf("cpu BBDT used %f ms, total labels %u\n", elapsedMilliSeconds2, iNumLabels);
		cvSaveImage("bbdt.bmp", dst2);
//		cvReleaseImage(&src2);
//		cvReleaseImage(&dst2);
	}

	if(smCnt != 0){

	bugimg = cvCreateImage(cvSize(nwidth, 9*bn),IPL_DEPTH_8U,1);
	bimg = cvCreateImage(cvSize(nwidth, 2*bn),IPL_DEPTH_8U,1);
	b2img = cvCreateImage(cvSize(nwidth, 2*bn),IPL_DEPTH_8U,1);

//    cvNamedWindow("src",CV_WINDOW_AUTOSIZE);
//	cvShowImage("src",srcimg);

	//prepare buffers for our gpu algorithm
	CudaBuffer srcBuf, dstBuf, dstBuf2, bBuf, b2Buf, errBuf, glabel;
	srcBuf.Create2D(nwidth, nheight);		//the binary image to be processed
	dstBuf.Create2D(nwidth, (nheight-2)/2); //the label result, only about 1/4 the size of source image contains the final labels
	dstBuf2.Create2D(nwidth,(nheight-2)/2);	//a copy of the pass1 temp result, for debug purpose
	glabel.Create2D(4, 1);					//a int size global buffer for unique final label
	errBuf.Create2D(nwidth, 9*bn);			//a buffer for debug info
	bBuf.Create2D(nwidth, 2 * bn);			//the intersection info used by pass2
	b2Buf.Create2D(nwidth, 2 * bn);			//a copy of bBuf for debug purpose

	srcBuf.SetZeroData();
	srcBuf.CopyFrom(srcimgb->imageData, srcimgb->widthStep, nwidth, cvGetSize(srcimgb).height);

	float elapsedTimeInMs = 0.0f;
    
	//-------------------gpu part----------------------------
    cudaEvent_t start, stop;
    cutilSafeCall  ( cudaEventCreate( &start ) );
    cutilSafeCall  ( cudaEventCreate( &stop ) );
    cutilSafeCall( cudaEventRecord( start, 0 ) );  
	
	if(nwidth == 512)
		label_512(&dstBuf, &dstBuf2, &srcBuf, &bBuf, &b2Buf, &glabel, nheight, bn, &errBuf);
	else if(nwidth == 1024)
		label_1024(&dstBuf, &dstBuf2, &srcBuf, &bBuf, &b2Buf, &glabel, nheight, bn, &errBuf);
	else if(nwidth == 2048)
		label_2048(&dstBuf, &dstBuf2, &srcBuf, &bBuf, &b2Buf, &glabel, nheight, bn, &errBuf);

    cutilSafeCall( cudaEventRecord( stop, 0 ) );
//	cutilCheckMsg("kernel launch failure");
	cudaEventSynchronize(stop);
	cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
	uint tlabel = 0;

	cudaMemcpy(&tlabel, glabel.GetData(), 4, cudaMemcpyDeviceToHost);
	printf("gpu alg 1 used %f ms, total labels %u\n", elapsedTimeInMs, tlabel);

	dstBuf.CopyToHost(srcimgb->imageData, srcimgb->widthStep, nwidth, (nheight-2)/2);
	dstBuf2.CopyToHost(srcimgb2->imageData, srcimgb->widthStep, nwidth, (nheight-2)/2);
	errBuf.CopyToHost(bugimg->imageData, bugimg->widthStep, nwidth, 9*bn);
	bBuf.CopyToHost(bimg->imageData, bimg->widthStep, nwidth, 2*bn);
	b2Buf.CopyToHost(b2img->imageData, bimg->widthStep, nwidth, 2*bn);

//	cvNamedWindow("gpu",CV_WINDOW_AUTOSIZE);
//	cvShowImage("gpu",srcimgb);
	cvSaveImage(argv[1], srcimgb);
	cvSaveImage("gpu2.bmp", srcimgb2);	//the final labels of our algorithm
	cvSaveImage("bug.bmp", bugimg);
	cvSaveImage("b.bmp", bimg);
	cvSaveImage("b2.bmp", b2img);
	
	//now start the gpu ccl implemented by Ostava
	alg2dst= cvCreateImage(cvSize(nwidth*4, cvGetSize(srcimgb).height),IPL_DEPTH_8U,1);
	CCLBase* m_ccl;
	m_ccl = new CCL();	

	m_ccl->FindRegions(nwidth, cvGetSize(srcimgb).height, &srcBuf);
	m_ccl->GetConnectedRegionsBuffer()->CopyToHost(alg2dst->imageData, alg2dst->widthStep, nwidth*4, cvGetSize(srcimgb).height);
	delete m_ccl;
	cvSaveImage("alg2.bmp", alg2dst);

	cvReleaseImage(&bugimg);
	cvReleaseImage(&bimg);
	cvReleaseImage(&b2img);
	cvReleaseImage(&alg2dst);
//	}
	//cvWaitKey(0);
	
	//now start cross compare label results of our ccl and the BBDT, to check the correctness
//	if(smCnt != 0){
		ushort *gpures, *cpures;
		uint sz = nwidth * (cvGetSize(srcimgb).height/2);
		gpures = (ushort*)malloc(sz);
		cpures = (ushort*)malloc(sz);
		dstBuf.CopyToHost(gpures, nwidth, nwidth, (cvGetSize(srcimgb).height/2));
		
		//first, reduce cpu labels from one label for each pixel to one label for a 2x2 block, assuming 8-connectivity
		for(int j = 0; j < (cvGetSize(srcimgb).height/2); j++)
			for(int i = 0; i < (nwidth/2); i++){
				uint* cpup;
				ushort res = LBMAX;
				uint y = j*2, x = i*2;
				cpup = (uint*)(dst2->imageData + y*dst2->widthStep);
//				if(y < cvGetSize(srcimgb).height){
					if(cpup[x] != 0)
						res = cpup[x]-1;
					if(cpup[x+1] != 0)
						res = cpup[x+1]-1;
//				}
				y++;
				cpup = (uint*)(dst2->imageData + y*dst2->widthStep);
//				if(y < cvGetSize(srcimgb).height){
					if(cpup[x] != 0)
						res = cpup[x]-1;
					if(cpup[x+1] != 0)
						res = cpup[x+1]-1;
//				}
				cpures[i + j*(nwidth/2)] = res;
			}
		
		//our algo use unsigned short to represent a label, the first label starts a 0, and maximun labels is LBMAX
		if(iNumLabels > LBMAX)
			printf("too much cc, compare abort.\n");
		else{
			//create a error
			//cpures[5] = 12;
			//cpures[15] = 18;
			printf("Checking correctness of gpu alg1\nChecking gpu ref by cpu.\n");
			checkLabels(cpures, gpures, nwidth/2, cvGetSize(srcimgb).height/2, iNumLabels);

			printf("Checking cpu ref by gpu.\n");
			checkLabels(gpures, cpures, nwidth/2, cvGetSize(srcimgb).height/2, tlabel);
		}

		free(gpures);
		free(cpures);
		printf("speedup is %f, %f, %f\n", gpu2time/elapsedTimeInMs, elapsedMilliSeconds1/elapsedTimeInMs, elapsedMilliSeconds2/elapsedTimeInMs);
	}

	cvReleaseImage(&srcimgb);
	cvReleaseImage(&srcimgb2);
	cvReleaseImage(&dst2);
	cvReleaseImage(&src2);

    cutilSafeCall( cudaThreadExit() );
	return 0;

}