Esempio n. 1
0
int main (int argv, char** argc[])
{
    int ncell = 0, prev_ncontour = 0, same_count = 0;
	////while (!worker->CancellationPending) {
		////worker->ReportProgress(50, String::Format(rm->GetString("Progress_Analyze_FoundNCell"), title, ncell));
		cvConvert(input_morph, tmp8UC1);
		cvClearMemStorage(storage);
		int ncontour = cvFindContours(tmp8UC1, storage, &first_con, sizeof(CvContour), CV_RETR_EXTERNAL);
		if (ncontour == 0)
			break; // finish extract cell
		if (ncontour == prev_ncontour) {
			cvErode(input_morph, input_morph);
			same_count++;
		} else
			same_count = 0;
		prev_ncontour = ncontour;
		cur = first_con;
		while (cur != nullptr) {
			double area = fabs(cvContourArea(cur));
			if ((area < 3000.0) || (same_count > 10)) {
				int npts = cur->total;
				CvPoint *p = new CvPoint[npts];
				cvCvtSeqToArray(cur, p);
				cvFillPoly(out_single, &p, &npts, 1, cvScalar(255.0)); // move to single
				cvFillPoly(input_morph, &p, &npts, 1, cvScalar(0.0)); // remove from input
				delete[] p;
				ncell++;
			}
			cur = cur->h_next;
		}
	////}
}
Esempio n. 2
0
void LevelCorrection::RebuildOutputDlg()
{
	//clear
	cvSet(m_output_sld_img, cvScalar(255,255,255));

	int npts = 3; 
	CvPoint** pts = (CvPoint**) cvAlloc(sizeof(CvPoint*) * 1);
	pts[0] = (CvPoint*) cvAlloc(sizeof(CvPoint) * 3);

	//min slider
	pts[0][0].x = m_min_out_val - LC_SLD_RAD;
	pts[0][0].y = LC_OUTPUT_SLD_H-1;
	pts[0][1].x = m_min_out_val;
	pts[0][1].y = 0;
	pts[0][2].x = m_min_out_val + LC_SLD_RAD;
	pts[0][2].y = LC_OUTPUT_SLD_H-1;
	cvFillPoly(m_output_sld_img, pts, &npts, 1, LC_OUTPUT_MIN_SLD_COLOR, CV_AA, 0);
	cvPolyLine(m_output_sld_img, pts, &npts, 1, 1, CV_RGB(0,0,0), 1, CV_AA, 0);

	//max slider
	pts[0][0].x = m_max_out_val - LC_SLD_RAD;
	pts[0][1].x = m_max_out_val;
	pts[0][2].x = m_max_out_val + LC_SLD_RAD;
	cvFillPoly(m_output_sld_img, pts, &npts, 1, LC_OUTPUT_MAX_SLD_COLOR, CV_AA, 0);
	cvPolyLine(m_output_sld_img, pts, &npts, 1, 1, CV_RGB(0,0,0), 1, CV_AA, 0);

	cvFree(&(pts[0]));
	cvFree(&pts);
}
Esempio n. 3
0
void drawFillPoly( ipl_image_wrapper& ipl_image
                   , const curve_vec_t& curves
                   , PIXEL              color     )
{
    const std::size_t num_curves = curves.size();

    boost::scoped_array<int> num_points_per_curve( new int[num_curves] );

    std::size_t total_num_points = 0;
    for( std::size_t i = 0; i < num_curves; ++i )
    {
        num_points_per_curve[i] = curves[i].size();
    }

    // The curve array vector will deallocate all memory by itself.
    cvpoint_array_vec_t pp( num_curves );

    CvPoint** curve_array = new CvPoint*[num_curves];

    for( std::size_t i = 0; i < num_curves; ++i )
    {
        pp[i] = make_cvPoint_array( curves[i] );

        curve_array[i] = pp[i].get();
    }

    cvFillPoly( ipl_image.get()
                , curve_array                // needs to be pointer to C array of CvPoints.
                , num_points_per_curve.get() // int array that contains number of points of each curve.
                , curves.size()
                , make_cvScalar( color ));
}
Esempio n. 4
0
int main(int argc, char* argv[])
{
	//假设需要用到的数据
	CvPoint *szPoint1 = new CvPoint[5];

	szPoint1[0] = { 78, 65 };

	szPoint1[1] = { 45, 164 };

	szPoint1[2] = { 102, 234 };

	szPoint1[3] = { 368, 173 };

	szPoint1[4] = { 126, 32 };

	

	CvPoint szPoint2[6] = { { 89, 323 }, { 0, 134 }, { 171, 262 }, { 126, 32 }, { 85, 85 }, { 75, 69 } };
	CvPoint szPoint3[7] = { { 78, 65 }, { 102, 234 }, { 368, 173 }, { 126, 32 }, { 12, 56 }, { 320, 172 }, { 95, 32 } };

	CvPoint *pSzArray[] = { szPoint1};
	int nArrayNumber[] = { 5 };

	//创建矩阵
	CvMat *Mat = cvCreateMat(nMatWidth, nMatHeight, CV_8UC3);
	assert(Mat != NULL);

	//绘制多边形
	cvFillPoly(Mat, pSzArray, nArrayNumber, 1, CV_RGB(255, 0, 0));

	CvFont font;
	char buf_T[256];
	cvInitFont(&font, CV_FONT_VECTOR0, 0.65, 0.65, 0, 2, 2);
	for (int i = 0; i < 5; i++)
	{
		sprintf_s(buf_T, 256, "%d", i + 1);
		cvPutText(Mat, buf_T, szPoint1[i], &font, CV_RGB(255, 0, 255));
		cvCircle(Mat,szPoint1[i],4,CV_RGB(0,255,0),-1);
	}

	delete[] szPoint1;
	
	//显示图像
	cvNamedWindow("Show_Result");
	cvShowImage("Show_Result", Mat);

	cvWaitKey();

	//释放资源
	cvReleaseMat(&Mat);
	cvDestroyWindow("Show_Resulst");

	return 0;
}// ConsoleApplication1.cpp : 定义控制台应用程序的入口点。
Esempio n. 5
0
/*
 * Creates an illumination
 * according to an illumination montage and the location of a segmented worm.
 *
 * To use with protocol, use GetMontageFromProtocolInterp() first
 *
 * FlipLR is a bool. When set to 1, the illumination pattern is reflected across the worm's centerline.
 */
void IllumWorm(SegmentedWorm* segworm, CvSeq* IllumMontage, IplImage* img,CvSize gridSize, int FlipLR){
	int DEBUG=0;
	if (DEBUG) printf("In IllumWorm()\n");
	CvPoint* polyArr=NULL;
	int k;
	int numpts=0;
	for (k = 0; k < IllumMontage->total; ++k) {

		numpts=CreatePointArrFromMontage(&polyArr,IllumMontage,k);
		//ReturnHere
		int j;
		//DisplayPtArr(polyArr,numpts);
		CvPoint* ptPtr=polyArr;
		for (j = 0; j < numpts; ++j) {
			/** make a local copy of the current pt in worm space **/
			CvPoint wormPt=*(ptPtr);
			/** replace that point with the new pt in image space **/
			*(ptPtr)=CvtPtWormSpaceToImageSpace(wormPt,segworm, gridSize,FlipLR);
			/** move to the next pointer **/
			ptPtr++;
		}



		if (DEBUG) {
				int i;
			printf("new polygon\n");
			for (i = 0; i < numpts; i++) {
				printf(" (%d, %d)\n",polyArr[i].x,polyArr[i].y);
				cvCircle(img, polyArr[i], 1, cvScalar(255, 255, 255), 1);
				cvShowImage("Debug",img);
				cvWaitKey(10);
			}

		}


		/** Actually draw the polygon **/
		cvFillPoly(img,&polyArr,&numpts,1,cvScalar(255,255,255),8);

		free(polyArr);
		polyArr=NULL;
	}

	if (DEBUG)	{
		IplImage* TempImage=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
		DrawSequence(&TempImage,segworm->LeftBound);
		DrawSequence(&TempImage, segworm->RightBound);
		double weighting=0.4;
		cvAddWeighted(img,weighting,TempImage,1,0,TempImage);
		cvShowImage("Debug",TempImage);
	}

}
//--------------------------------------------------------------------------------
void  ofxCvGrayscaleAdvanced::drawBlobIntoMe( ofxCvBlob &blob, int color ) {
	if( blob.pts.size() > 0 ) {
		CvPoint* pts = new CvPoint[blob.nPts];
		for( int i=0; i < blob.nPts ; i++ ) {
			pts[i].x = (int)blob.pts[i].x;
			pts[i].y = (int)blob.pts[i].y;
		}
		int nPts = blob.nPts;
		cvFillPoly( cvImage, &pts, &nPts, 1,
				   CV_RGB(color,color,color) );
		delete pts;
	}
}
Esempio n. 7
0
//---------------------------------------------------------------------------------
// useful for checking for occlusion, etc with another image
// (via image arithmatic. &= etc)
void videoBlob::draw(ofxCvGrayscaleImage &mom, int color){

	if (nPts > 0 ){
	   CvPoint * ptsTemp = new CvPoint[nPts];
	   for (int i = 0; i < nPts ; i++){
	           ptsTemp[i].x = pts[i].x;
	           ptsTemp[i].y = pts[i].y;
	   }
	   cvFillPoly( mom.getCvImage(), &ptsTemp, &(nPts),1,
	           CV_RGB(color,color,color));
	   delete ptsTemp;

	}
}
Esempio n. 8
0
void LevelCorrection::RebuildLevelDlg()
{
	//clear
	cvSet(m_level_sld_img, cvScalar(255,255,255));

	int npts = 3; 
	CvPoint** pts = (CvPoint**) cvAlloc(sizeof(CvPoint*) * 1);
	pts[0] = (CvPoint*) cvAlloc(sizeof(CvPoint) * 3);

	//min slider
	pts[0][0].x = m_min_level - LC_SLD_RAD;
	pts[0][0].y = LC_LEVEL_SLD_H-1;
	pts[0][1].x = m_min_level;
	pts[0][1].y = 0;
	pts[0][2].x = m_min_level + LC_SLD_RAD;
	pts[0][2].y = LC_LEVEL_SLD_H-1;
	cvFillPoly(m_level_sld_img, pts, &npts, 1, LC_LEVEL_MIN_SLD_COLOR, 8, 0);
	cvPolyLine(m_level_sld_img, pts, &npts, 1, 1, CV_RGB(0,0,0), 1, 8, 0);

	//max slider
	pts[0][0].x = m_max_level - LC_SLD_RAD;
	pts[0][1].x = m_max_level;
	pts[0][2].x = m_max_level + LC_SLD_RAD;
	cvFillPoly(m_level_sld_img, pts, &npts, 1, LC_LEVEL_MAX_SLD_COLOR, 8, 0);
	cvPolyLine(m_level_sld_img, pts, &npts, 1, 1, CV_RGB(0,0,0), 1, 8, 0);

	//
	int gamma_x = m_gamma*(m_max_level - m_min_level) + m_min_level + 0.5;
	pts[0][0].x = gamma_x - LC_SLD_RAD;
	pts[0][1].x = gamma_x;
	pts[0][2].x = gamma_x + LC_SLD_RAD;
	cvFillPoly(m_level_sld_img, pts, &npts, 1, LC_LEVEL_GAM_SLD_COLOR, 8, 0);
	cvPolyLine(m_level_sld_img, pts, &npts, 1, 1, CV_RGB(0,0,0), 1, 8, 0);

	cvFree(&(pts[0]));
	cvFree(&pts);
}
Esempio n. 9
0
int main( int argc, char** argv )
{
	/* data structure for the image */
	IplImage *img = 0;

	/* check for supplied argument */
	if( argc < 2 ) {
		fprintf( stderr, "Usage: loadimg <filename>\n" );
		return 1;
	}
	/* load the image,
	use CV_LOAD_IMAGE_GRAYSCALE to load the image in grayscale */
	img = cvLoadImage( argv[1], CV_LOAD_IMAGE_COLOR );

	/* always check */
	if( img == 0 ) {
		fprintf( stderr, "Cannot load file %s!\n", argv[1] );
		return 1;
	}

	/* create a window */ 
	cvNamedWindow( "image", CV_WINDOW_AUTOSIZE );


//	CvPoint  curve1[]={10,10,  10,100,  100,100,  100,10};
//	CvPoint  curve2[]={30,30,  30,130,  130,130,  130,30,  150,10};
//	CvPoint* curveArr[2]={curve1, curve2};
	CvPoint  curve2[]={0,60,  50,50,  60,60,};
	CvPoint* curveArr[1]={curve2};
	int      nCurvePts[1]={3};
	int      nCurves=1;
	int      isCurveClosed=1;
	int      lineWidth=15;

	//cvPolyLine(img,curveArr,nCurvePts,nCurves,isCurveClosed,cvScalar(0,255,255),lineWidth);
	cvFillPoly(img,curveArr,nCurvePts,nCurves,cvScalar(0,255,255));

	/* display the image */  
	cvShowImage( "image", img );

	/* wait until user press a key */
	cvWaitKey(0);

	/* free memory */
	cvDestroyWindow( "image" );

	cvReleaseImage( &img );
	return 0;
}
Esempio n. 10
0
//--------------------------------------------------------------------------------
void  ofxCvImage::drawBlobIntoMe( ofxCvBlob& blob, int color ) {
	if( !bAllocated ){
		ofLogError("ofxCvImage") << "drawBlobIntoMe(): image not allocated";
		return;
	}
	if( blob.pts.size() > 0 ) {
	   CvPoint* pts = new CvPoint[blob.nPts];
	   for( int i=0; i < blob.nPts ; i++ ) {
		   pts[i].x = (int)blob.pts[i].x;
		   pts[i].y = (int)blob.pts[i].y;
	   }
	   int nPts = blob.nPts;
	   cvFillPoly( cvImage, &pts, &nPts, 1,
				   CV_RGB(color,color,color) );
	   delete[] pts;
	}
}
//--------------------------------------------------------------------------------
void  ofxCvImage::drawBlobIntoMe( ofxCvBlob& blob, int color ) {
	if( !bAllocated ){
		ofLog(OF_LOG_ERROR, "in drawBlobIntoMe, need to allocate image first");
		return;
	}
	if( blob.pts.size() > 0 ) {
	   CvPoint* pts = new CvPoint[blob.nPts];
	   for( int i=0; i < blob.nPts ; i++ ) {
		   pts[i].x = (int)blob.pts[i].x;
		   pts[i].y = (int)blob.pts[i].y;
	   }
	   int nPts = blob.nPts;
	   cvFillPoly( cvImage, &pts, &nPts, 1,
				   CV_RGB(color,color,color) );
	   delete pts;
	}
}
//--------------------------------------------------------------
void DepthHoleFiller::computeBlobImage (){
	
	ofxCv8uC1_Blobs.set(0); // clear any previous blobs.
	CvScalar color_white = CV_RGB(255,255,255);
	
	int nHolesToFill = contourFinder.blobs.size();
	for (int i=0; i<nHolesToFill; i++){
		ofxCvBlob blob = contourFinder.blobs.at(i);
		int nPts = MIN(blob.nPts, MAX_N_CONTOUR_POINTS);
		
		for (int j=0; j<nPts; j++){ 
			ofPoint pt = blob.pts.at(j);
			int px = (int) pt.x; // 0...imgw
			int py = (int) pt.y; // 0...imgh
			cvpts[0][j].x = px;
			cvpts[0][j].y = py;
		}
		ncvpts[0] = nPts;
		cvFillPoly (ofxCv8uC1_Blobs.getCvImage(), cvpts, ncvpts, 1, color_white, 8, 0 );
	}
}
Esempio n. 13
0
static int aPyrSegmentation(void* agr)
{
    CvPoint _cp[] ={33,33, 43,33, 43,43, 33,43}; 
    CvPoint _cp2[] ={50,50, 70,50, 70,70, 50,70};  
    CvPoint* cp = _cp;
    CvPoint* cp2 = _cp2;
    CvConnectedComp *dst_comp[3];
    CvRect rect[3] = {50,50,21,21, 0,0,128,128, 33,33,11,11};
    double a[3] = {441.0, 15822.0, 121.0};

/*    ippiPoint cp3[] ={130,130, 150,130, 150,150, 130,150};  */
/*	CvPoint cp[] ={0,0, 5,5, 5,0, 10,5, 10,0, 15,5, 15,0};  */
    int chanels = (int)agr;    /* number of the color chanels  */
    int width = 128;
    int height = 128;
    int nPoints = 4;
    int block_size = 1000;
    int color1 = 30, color2 = 110, color3 = 180;
    int level = 5;
    long diff, l;
    int code;

    CvMemStorage *storage;   /*   storage for connected component writing  */
    CvSeq *comp;

    double lower, upper;
    unsigned seed;
    char rand;
    AtsRandState state;
    int i,j;

    IplImage *image, *image_f, *image_s;
    CvSize size;
    uchar *f_cur, *f_row;
    uchar *row;
    uchar *cur;
    int threshold1, threshold2;

    code = TRS_OK;

    if(chanels != 1 && chanels != 3)
        return TRS_UNDEF;
/* read tests params */

    if(!trsiRead( &width, "128", "image width" ))
        return TRS_UNDEF;
    if(!trsiRead( &height, "128", "image height" ))
        return TRS_UNDEF;
    if(!trsiRead( &level, "5", "pyramid level" ))
        return TRS_UNDEF;


/*  create Image   */
    l = width*height;
    size.width = width;
    size.height = height;

    rect[1].height = height;
    rect[1].width = width;
    a[1] = l - a[0] - a[2];

    image = cvCreateImage(cvSize(size.width, size.height), IPL_DEPTH_8U, chanels); 
    image_s = cvCreateImage(cvSize(size.width, size.height), IPL_DEPTH_8U, chanels); 

    memset(image->imageData, color1, chanels*l);

    image_f = cvCreateImage(cvSize(size.width, size.height), IPL_DEPTH_8U, chanels); 

    OPENCV_CALL( storage = cvCreateMemStorage( block_size ) );

/*  do noise   */
    upper = 20;
    lower = -upper;
    seed = 345753;
    atsRandInit( &state, lower, upper, seed );

/*   segmentation by pyramid     */    
    threshold1 = 50;
    threshold2 = 50;

    switch(chanels)
    {
        case 1:
        {
            cvFillPoly( image, &cp, &nPoints, 1, color2);
            cvFillPoly( image, &cp2, &nPoints, 1, color3); 

            row = (uchar*)image->imageData;
            f_row = (uchar*)image_f->imageData;
            for(i = 0; i<size.height; i++)
            {
                cur = row;
                f_cur = f_row;
                for(j = 0; j<size.width; j++)
                {
                    atsbRand8s( &state, &rand, 1);
                    *(f_cur)=(uchar)((*cur) + rand);
                    cur++;
                    f_cur++;
                }
                row+=image->widthStep;
                f_row+=image_f->widthStep;
            }

            cvPyrSegmentation( image_f, image_s,
                               storage, &comp, 
                               level, threshold1, threshold2 );

            //if(comp->total != 3) { code = TRS_FAIL; goto exit; }
/*  read the connected components     */
            /*dst_comp[0] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 0 );
            dst_comp[1] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 1 );
            dst_comp[2] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 2 );*/
            break;
        }
        case 3:
        {
            cvFillPoly( image, &cp, &nPoints, 1, CV_RGB(color2,color2,color2));
            cvFillPoly( image, &cp2, &nPoints, 1, CV_RGB(color3,color3,color3)); 

            row = (uchar*)image->imageData;
            f_row = (uchar*)image_f->imageData;
            for(i = 0; i<size.height; i++)
            {
                cur = row;
                f_cur = f_row;
                for(j = 0; j<size.width; j++)
                {
                    atsbRand8s( &state, &rand, 1);
                    *(f_cur)=(uchar)((*cur) + rand);
                    atsbRand8s( &state, &rand, 1);
                    *(f_cur+1)=(uchar)(*(cur+1) + rand);
                    atsbRand8s( &state, &rand, 1);
                    *(f_cur+2)=(uchar)(*(cur+2) + rand);
                    cur+=3;
                    f_cur+=3;
                }
                row+=image->widthStep;
                f_row+=image_f->widthStep;
            }

            cvPyrSegmentation(image_f, image_s, storage, &comp, level,
                              threshold1, threshold2);   
/*  read the connected components     */
            if(comp->total != 3) { code = TRS_FAIL; goto exit; }
            dst_comp[0] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 0 );
            dst_comp[1] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 1 );
            dst_comp[2] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 2 );
            break;
        }
    }
 
    diff = 0;
    /*diff = atsCompare1Db( (uchar*)image->imageData, (uchar*)image_s->imageData, chanels*l, 4);
 
    for(i = 0; i < 3; i++)
    {
        if(dst_comp[i]->area != a[i]) diff++;
        if(dst_comp[i]->rect.x != rect[i].x) diff++;
        if(dst_comp[i]->rect.y != rect[i].y) diff++;
        if(dst_comp[i]->rect.width != rect[i].width) diff++;
        if(dst_comp[i]->rect.height != rect[i].height) diff++;
    }*/

    trsWrite( ATS_CON | ATS_LST | ATS_SUM, "upper =%f diff =%ld \n",upper, diff);

    if(diff > 0 )
        code = TRS_FAIL;
    else
        code = TRS_OK;

exit:

    cvReleaseMemStorage( &storage );
    cvReleaseImage(&image_f);
    cvReleaseImage(&image);
    cvReleaseImage(&image_s);

   

/*    trsFree(cp);  */
/*    _getch();     */
    return code;
 
    
}
Esempio n. 14
0
static int  aAdaptThreshold()
{

    CvPoint *cp;
    int parameter1 = 3;
    double parameter2 = 10;
    int width = 128;
    int height = 128;
    int kp = 5;
    int nPoints2 = 20;

    int fi = 0;
    int a2 = 20;
    int b2 = 25,xc,yc;

    double pi = 3.1415926;

    double lower, upper;
    unsigned seed;
    char rand;
    AtsRandState state;

    long diff_binary, diff_binary_inv;
    
    int l,i,j;

    IplImage *imBinary, *imBinary_inv, *imTo_zero, *imTo_zero_inv, *imInput, *imOutput;
    CvSize size;

    int code = TRS_OK;

//  read tests params 
    if(!trsiRead( &width, "128", "image width" ))
        return TRS_UNDEF;
    if(!trsiRead( &height, "128", "image height" ))
        return TRS_UNDEF;

//  initialized image
    l = width*height*sizeof(uchar);

    cp = (CvPoint*) trsmAlloc(nPoints2*sizeof(CvPoint));

    xc = (int)( width/2.);
    yc = (int)( height/2.);

    kp = nPoints2;

    size.width = width;
    size.height = height;

    int xmin = width;
    int ymin = height;
    int xmax = 0;
    int ymax = 0;
    
    
    for(i=0;i<nPoints2;i++)
    {
        cp[i].x = (int)(a2*cos(2*pi*i/nPoints2)*cos(2*pi*fi/360.))-
        (int)(b2*sin(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+xc;
        if(xmin> cp[i].x) xmin = cp[i].x;
        if(xmax< cp[i].x) xmax = cp[i].x;
        cp[i].y = (int)(a2*cos(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+
                    (int)(b2*sin(2*pi*i/nPoints2)*cos(2*pi*fi/360.))+yc;
        if(ymin> cp[i].y) ymin = cp[i].y;
        if(ymax< cp[i].y) ymax = cp[i].y;
    }

    if(xmax>width||xmin<0||ymax>height||ymin<0) return TRS_FAIL;
    
//  IPL image moment calculation  
//  create image  
    imBinary = cvCreateImage( size, 8, 1 );
    imBinary_inv = cvCreateImage( size, 8, 1 );
    imTo_zero = cvCreateImage( size, 8, 1 );
    imTo_zero_inv = cvCreateImage( size, 8, 1 );
    imOutput = cvCreateImage( size, 8, 1 );
    imInput = cvCreateImage( size, 8, 1 );

    int bgrn = 50;
    int signal = 150;
    
    memset(imInput->imageData,bgrn,l);

    cvFillPoly(imInput, &cp, &kp, 1, cvScalarAll(signal));

//  do noise   
    upper = 22;
    lower = -upper;
    seed = 345753;
    atsRandInit( &state, lower, upper, seed );
    
    uchar *input = (uchar*)imInput->imageData;
    uchar *binary = (uchar*)imBinary->imageData;
    uchar *binary_inv = (uchar*)imBinary_inv->imageData;
    uchar *to_zero = (uchar*)imTo_zero->imageData;
    uchar *to_zero_inv = (uchar*)imTo_zero_inv->imageData;
    double *parameter = (double*)trsmAlloc(2*sizeof(double));

    int step = imInput->widthStep;

    for(i = 0; i<size.height; i++, input+=step, binary+=step, binary_inv+=step, to_zero+=step,to_zero_inv+=step)
    {
         for(j = 0; j<size.width; j++)
         {
                atsbRand8s( &state, &rand, 1);   
                if(input[j] == bgrn) 
                {
                    binary[j] = to_zero[j] = (uchar)0;
                    binary_inv[j] = (uchar)255;
                    to_zero_inv[j] = input [j] = (uchar)(bgrn + rand);
                }
                else 
                {
                    binary[j] = (uchar)255;
                    binary_inv[j] = to_zero_inv[j] = (uchar)0;
                    to_zero[j] = input[j] = (uchar)(signal + rand);
                }
        
         }
    }



    cvAdaptiveThreshold( imInput, imOutput, (double)255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, parameter1, parameter2 ); 
    diff_binary = atsCompare1Db( (uchar*)imOutput->imageData, (uchar*)imBinary->imageData, l, 5);

    cvAdaptiveThreshold( imInput, imOutput, (double)255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, parameter1, parameter2 ); 
    diff_binary_inv = atsCompare1Db( (uchar*)imOutput->imageData, (uchar*)imBinary_inv->imageData, l, 5);

    if( diff_binary > 5 || diff_binary_inv > 5 )
        code = TRS_FAIL;  
    
    cvReleaseImage(&imInput);
    cvReleaseImage(&imOutput);
    cvReleaseImage(&imBinary);
    cvReleaseImage(&imBinary_inv);
    cvReleaseImage(&imTo_zero);
    cvReleaseImage(&imTo_zero_inv);

    trsWrite( ATS_CON | ATS_LST | ATS_SUM, "diff_binary =%ld \n", diff_binary); 
    trsWrite( ATS_CON | ATS_LST | ATS_SUM, "diff_binary_inv =%ld \n", diff_binary_inv); 

    trsFree(parameter);
    trsFree(cp);
    return code;
}
static GstFlowReturn
kms_crowd_detector_transform_frame_ip (GstVideoFilter * filter,
    GstVideoFrame * frame)
{
  KmsCrowdDetector *crowddetector = KMS_CROWD_DETECTOR (filter);
  GstMapInfo info;

  kms_crowd_detector_initialize_images (crowddetector, frame);
  if ((crowddetector->priv->num_rois == 0)
      && (crowddetector->priv->rois != NULL)) {
    kms_crowd_detector_extract_rois (crowddetector);
  }
  if (crowddetector->priv->pixels_rois_counted == TRUE &&
      crowddetector->priv->actual_image != NULL) {
    kms_crowd_detector_count_num_pixels_rois (crowddetector);
    crowddetector->priv->pixels_rois_counted = FALSE;
  }
  gst_buffer_map (frame->buffer, &info, GST_MAP_READ);
  crowddetector->priv->actual_image->imageData = (char *) info.data;

  IplImage *frame_actual_gray =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (frame_actual_gray);

  IplImage *actual_lbp =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (actual_lbp);

  IplImage *lbp_temporal_result =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (lbp_temporal_result);

  IplImage *add_lbps_result =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (add_lbps_result);

  IplImage *lbps_alpha_result_rgb =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 3);

  cvSet (lbps_alpha_result_rgb, CV_RGB (0, 0, 0), 0);

  IplImage *actual_image_masked =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1);

  cvZero (actual_image_masked);

  IplImage *substract_background_to_actual =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (substract_background_to_actual);

  IplImage *low_speed_map =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (low_speed_map);

  IplImage *high_speed_map =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (high_speed_map);

  IplImage *actual_motion =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 3);

  cvSet (actual_motion, CV_RGB (0, 0, 0), 0);

  IplImage *binary_actual_motion =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (binary_actual_motion);

  uint8_t *low_speed_pointer;
  uint8_t *low_speed_pointer_aux;
  uint8_t *high_speed_pointer;
  uint8_t *high_speed_pointer_aux;
  uint8_t *actual_motion_pointer;
  uint8_t *actual_motion_pointer_aux;
  uint8_t *binary_actual_motion_pointer;
  uint8_t *binary_actual_motion_pointer_aux;

  int w, h;

  if (crowddetector->priv->num_rois != 0) {
    cvFillPoly (actual_image_masked, crowddetector->priv->curves,
        crowddetector->priv->n_points, crowddetector->priv->num_rois,
        cvScalar (255, 255, 255, 0), CV_AA, 0);
  }
  cvCvtColor (crowddetector->priv->actual_image, frame_actual_gray,
      CV_BGR2GRAY);
  kms_crowd_detector_mask_image (frame_actual_gray, actual_image_masked, 0);

  if (crowddetector->priv->background == NULL) {
    cvCopy (frame_actual_gray, crowddetector->priv->background, 0);
  } else {
    cvAddWeighted (crowddetector->priv->background, BACKGROUND_ADD_RATIO,
        frame_actual_gray, 1 - BACKGROUND_ADD_RATIO, 0,
        crowddetector->priv->background);
  }

  kms_crowd_detector_compute_temporal_lbp (frame_actual_gray, actual_lbp,
      actual_lbp, FALSE);
  kms_crowd_detector_compute_temporal_lbp (frame_actual_gray,
      lbp_temporal_result, crowddetector->priv->frame_previous_gray, TRUE);
  cvAddWeighted (crowddetector->priv->previous_lbp, LBPS_ADD_RATIO, actual_lbp,
      (1 - LBPS_ADD_RATIO), 0, add_lbps_result);
  cvSub (crowddetector->priv->previous_lbp, actual_lbp, add_lbps_result, 0);
  cvThreshold (add_lbps_result, add_lbps_result, 70.0, 255.0, CV_THRESH_OTSU);
  cvNot (add_lbps_result, add_lbps_result);
  cvErode (add_lbps_result, add_lbps_result, 0, 4);
  cvDilate (add_lbps_result, add_lbps_result, 0, 11);
  cvErode (add_lbps_result, add_lbps_result, 0, 3);
  cvCvtColor (add_lbps_result, lbps_alpha_result_rgb, CV_GRAY2BGR);
  cvCopy (actual_lbp, crowddetector->priv->previous_lbp, 0);
  cvCopy (frame_actual_gray, crowddetector->priv->frame_previous_gray, 0);

  if (crowddetector->priv->acumulated_lbp == NULL) {
    cvCopy (add_lbps_result, crowddetector->priv->acumulated_lbp, 0);
  } else {
    cvAddWeighted (crowddetector->priv->acumulated_lbp, TEMPORAL_LBPS_ADD_RATIO,
        add_lbps_result, 1 - TEMPORAL_LBPS_ADD_RATIO, 0,
        crowddetector->priv->acumulated_lbp);
  }

  cvThreshold (crowddetector->priv->acumulated_lbp, high_speed_map,
      150.0, 255.0, CV_THRESH_BINARY);
  cvSmooth (high_speed_map, high_speed_map, CV_MEDIAN, 3, 0, 0.0, 0.0);
  kms_crowd_detector_substract_background (frame_actual_gray,
      crowddetector->priv->background, substract_background_to_actual);
  cvThreshold (substract_background_to_actual, substract_background_to_actual,
      70.0, 255.0, CV_THRESH_OTSU);

  cvCanny (substract_background_to_actual,
      substract_background_to_actual, 70.0, 150.0, 3);

  if (crowddetector->priv->acumulated_edges == NULL) {
    cvCopy (substract_background_to_actual,
        crowddetector->priv->acumulated_edges, 0);
  } else {
    cvAddWeighted (crowddetector->priv->acumulated_edges, EDGES_ADD_RATIO,
        substract_background_to_actual, 1 - EDGES_ADD_RATIO, 0,
        crowddetector->priv->acumulated_edges);
  }

  kms_crowd_detector_process_edges_image (crowddetector, low_speed_map, 3);
  cvErode (low_speed_map, low_speed_map, 0, 1);

  low_speed_pointer = (uint8_t *) low_speed_map->imageData;
  high_speed_pointer = (uint8_t *) high_speed_map->imageData;
  actual_motion_pointer = (uint8_t *) actual_motion->imageData;
  binary_actual_motion_pointer = (uint8_t *) binary_actual_motion->imageData;

  for (h = 0; h < low_speed_map->height; h++) {
    low_speed_pointer_aux = low_speed_pointer;
    high_speed_pointer_aux = high_speed_pointer;
    actual_motion_pointer_aux = actual_motion_pointer;
    binary_actual_motion_pointer_aux = binary_actual_motion_pointer;
    for (w = 0; w < low_speed_map->width; w++) {
      if (*high_speed_pointer_aux == 0) {
        actual_motion_pointer_aux[0] = 255;
        binary_actual_motion_pointer_aux[0] = 255;
      }
      if (*low_speed_pointer_aux == 255) {
        *actual_motion_pointer_aux = 0;
        actual_motion_pointer_aux[2] = 255;
        binary_actual_motion_pointer_aux[0] = 255;
      } else if (*high_speed_pointer_aux == 0) {
        actual_motion_pointer_aux[0] = 255;
      }
      low_speed_pointer_aux++;
      high_speed_pointer_aux++;
      actual_motion_pointer_aux = actual_motion_pointer_aux + 3;
      binary_actual_motion_pointer_aux++;
    }
    low_speed_pointer += low_speed_map->widthStep;
    high_speed_pointer += high_speed_map->widthStep;
    actual_motion_pointer += actual_motion->widthStep;
    binary_actual_motion_pointer += binary_actual_motion->widthStep;
  }

  int curve;

  for (curve = 0; curve < crowddetector->priv->num_rois; curve++) {

    if (crowddetector->priv->rois_data[curve].send_optical_flow_event == TRUE) {

      CvRect container =
          kms_crowd_detector_get_square_roi_contaniner (crowddetector, curve);

      cvSetImageROI (crowddetector->priv->actual_image, container);
      cvSetImageROI (crowddetector->priv->previous_image, container);
      cvSetImageROI (actual_motion, container);

      kms_crowd_detector_compute_optical_flow (crowddetector,
          binary_actual_motion, container, curve);

      cvResetImageROI (crowddetector->priv->actual_image);
      cvResetImageROI (crowddetector->priv->previous_image);
    }
  }

  {
    uint8_t *orig_row_pointer =
        (uint8_t *) crowddetector->priv->actual_image->imageData;
    uint8_t *overlay_row_pointer = (uint8_t *) actual_motion->imageData;

    for (h = 0; h < crowddetector->priv->actual_image->height; h++) {
      uint8_t *orig_column_pointer = orig_row_pointer;
      uint8_t *overlay_column_pointer = overlay_row_pointer;

      for (w = 0; w < crowddetector->priv->actual_image->width; w++) {
        int c;

        for (c = 0; c < crowddetector->priv->actual_image->nChannels; c++) {
          if (overlay_column_pointer[c] != 0) {
            orig_column_pointer[c] = overlay_column_pointer[c];
          }
        }

        orig_column_pointer += crowddetector->priv->actual_image->nChannels;
        overlay_column_pointer += actual_motion->nChannels;
      }
      orig_row_pointer += crowddetector->priv->actual_image->widthStep;
      overlay_row_pointer += actual_motion->widthStep;
    }
  }

  if (crowddetector->priv->num_rois != 0) {
    cvPolyLine (crowddetector->priv->actual_image, crowddetector->priv->curves,
        crowddetector->priv->n_points, crowddetector->priv->num_rois, 1,
        cvScalar (255, 255, 255, 0), 1, 8, 0);
  }

  cvNot (high_speed_map, high_speed_map);
  kms_crowd_detector_roi_analysis (crowddetector, low_speed_map,
      high_speed_map);

  cvReleaseImage (&frame_actual_gray);
  cvReleaseImage (&actual_lbp);
  cvReleaseImage (&lbp_temporal_result);
  cvReleaseImage (&add_lbps_result);
  cvReleaseImage (&lbps_alpha_result_rgb);
  cvReleaseImage (&actual_image_masked);
  cvReleaseImage (&substract_background_to_actual);
  cvReleaseImage (&low_speed_map);
  cvReleaseImage (&high_speed_map);
  cvReleaseImage (&actual_motion);
  cvReleaseImage (&binary_actual_motion);

  gst_buffer_unmap (frame->buffer, &info);

  return GST_FLOW_OK;
}
Esempio n. 16
0
static int drawing_test()
{
    static int read_params = 0;
    static int read = 0;
    const int channel = 3;
    CvSize size = cvSize(600, 300);

    int i, j;
    int Errors = 0;

    if( !read_params )
    {
        read_params = 1;

        trsCaseRead( &read, "/n/y", "y", "Read from file ?" );
    }
    // Create image
    IplImage* image = cvCreateImage( size, IPL_DEPTH_8U, channel );

    // cvLine
    cvZero( image );

    for( i = 0; i < 100; i++ )
    {
        CvPoint p1 = cvPoint( i - 30, i * 4 + 10 );
        CvPoint p2 = cvPoint( size.width + 30 - i, size.height - 10 - i * 4 );

        cvLine( image, p1, p2, CV_RGB(178+i, 255-i, i), i % 10 );
    }
    Errors += ProcessImage( image, "cvLine", read );

    // cvLineAA
    cvZero( image );

    for( i = 0; i < 100; i++ )
    {
        CvPoint p1 = cvPoint( i - 30, i * 4 + 10 );
        CvPoint p2 = cvPoint( size.width + 30 - i, size.height - 10 - i * 4 );

        cvLine( image, p1, p2, CV_RGB(178+i, 255-i, i), 1, CV_AA, 0 );
    }
    //Errors += ProcessImage( image, "cvLineAA", read );

    // cvRectangle
    cvZero( image );

    for( i = 0; i < 100; i++ )
    {
        CvPoint p1 = cvPoint( i - 30, i * 4 + 10 );
        CvPoint p2 = cvPoint( size.width + 30 - i, size.height - 10 - i * 4 );

        cvRectangle( image, p1, p2, CV_RGB(178+i, 255-i, i), i % 10 );
    }
    Errors += ProcessImage( image, "cvRectangle", read );

#if 0
        named_window( "Diff", 0 );
#endif

    // cvCircle
    cvZero( image );

    for( i = 0; i < 100; i++ )
    {
        CvPoint p1 = cvPoint( i * 3, i * 2 );
        CvPoint p2 = cvPoint( size.width - i * 3, size.height - i * 2 );

        cvCircle( image, p1, i, CV_RGB(178+i, 255-i, i), i % 10 );
        cvCircle( image, p2, i, CV_RGB(178+i, 255-i, i), i % 10 );

#if 0
        show_iplimage( "Diff", image );
        wait_key(0);
#endif
    }
    Errors += ProcessImage( image, "cvCircle", read );

    // cvCircleAA
    cvZero( image );

    for( i = 0; i < 100; i++ )
    {
        CvPoint p1 = cvPoint( i * 3, i * 2 );
        CvPoint p2 = cvPoint( size.width - i * 3, size.height - i * 2 );

        cvCircleAA( image, p1, i, RGB(i, 255 - i, 178 + i), 0 );
        cvCircleAA( image, p2, i, RGB(i, 255 - i, 178 + i), 0 );
    }
    Errors += ProcessImage( image, "cvCircleAA", read );

    // cvEllipse
    cvZero( image );

    for( i = 10; i < 100; i += 10 )
    {
        CvPoint p1 = cvPoint( i * 6, i * 3 );
        CvSize axes = cvSize( i * 3, i * 2 );

        cvEllipse( image, p1, axes,
                   180 * i / 100, 90 * i / 100, 90 * (i - 100) / 100,
                   CV_RGB(178+i, 255-i, i), i % 10 );
    }
    Errors += ProcessImage( image, "cvEllipse", read );

    // cvEllipseAA
    cvZero( image );

    for( i = 10; i < 100; i += 10 )
    {
        CvPoint p1 = cvPoint( i * 6, i * 3 );
        CvSize axes = cvSize( i * 3, i * 2 );

        cvEllipseAA( image, p1, axes,
                   180 * i / 100, 90 * i / 100, 90 * (i - 100) / 100,
                   RGB(i, 255 - i, 178 + i), i % 10 );
    }
    Errors += ProcessImage( image, "cvEllipseAA", read );

    // cvFillConvexPoly
    cvZero( image );

    for( j = 0; j < 5; j++ )
        for( i = 0; i < 100; i += 10 )
        {
            CvPoint p[4] = {{ j * 100 - 10, i }, { j * 100 + 10, i },
                            { j * 100 + 30, i * 2 }, { j * 100 + 170, i * 3 }};
            cvFillConvexPoly( image, p, 4, CV_RGB(178+i, 255-i, i) );

        }
    Errors += ProcessImage( image, "cvFillConvexPoly", read );

    // cvFillPoly
    cvZero( image );

    for( i = 0; i < 100; i += 10 )
    {
        CvPoint p0[] = {{-10, i}, { 10, i}, { 30, i * 2}, {170, i * 3}};
        CvPoint p1[] = {{ 90, i}, {110, i}, {130, i * 2}, {270, i * 3}};
        CvPoint p2[] = {{190, i}, {210, i}, {230, i * 2}, {370, i * 3}};
        CvPoint p3[] = {{290, i}, {310, i}, {330, i * 2}, {470, i * 3}};
        CvPoint p4[] = {{390, i}, {410, i}, {430, i * 2}, {570, i * 3}};

        CvPoint* p[] = {p0, p1, p2, p3, p4};

        int n[] = {4, 4, 4, 4, 4};
        cvFillPoly( image, p, n, 5, CV_RGB(178+i, 255-i, i) );
    }
    Errors += ProcessImage( image, "cvFillPoly", read );

    // cvPolyLine
    cvZero( image );

    for( i = 0; i < 100; i += 10 )
    {
        CvPoint p0[] = {{-10, i}, { 10, i}, { 30, i * 2}, {170, i * 3}};
        CvPoint p1[] = {{ 90, i}, {110, i}, {130, i * 2}, {270, i * 3}};
        CvPoint p2[] = {{190, i}, {210, i}, {230, i * 2}, {370, i * 3}};
        CvPoint p3[] = {{290, i}, {310, i}, {330, i * 2}, {470, i * 3}};
        CvPoint p4[] = {{390, i}, {410, i}, {430, i * 2}, {570, i * 3}};

        CvPoint* p[] = {p0, p1, p2, p3, p4};

        int n[] = {4, 4, 4, 4, 4};
        cvPolyLine( image, p, n, 5, 1, CV_RGB(178+i, 255-i, i), i % 10 );
    }
    Errors += ProcessImage( image, "cvPolyLine", read );

    // cvPolyLineAA
    cvZero( image );

    for( i = 0; i < 100; i += 10 )
    {
        CvPoint p0[] = {{-10, i}, { 10, i}, { 30, i * 2}, {170, i * 3}};
        CvPoint p1[] = {{ 90, i}, {110, i}, {130, i * 2}, {270, i * 3}};
        CvPoint p2[] = {{190, i}, {210, i}, {230, i * 2}, {370, i * 3}};
        CvPoint p3[] = {{290, i}, {310, i}, {330, i * 2}, {470, i * 3}};
        CvPoint p4[] = {{390, i}, {410, i}, {430, i * 2}, {570, i * 3}};

        CvPoint* p[] = {p0, p1, p2, p3, p4};

        int n[] = {4, 4, 4, 4, 4};
        cvPolyLineAA( image, p, n, 5, 1, RGB(i, 255 - i, 178 + i), 0 );
    }
    Errors += ProcessImage( image, "cvPolyLineAA", read );

    // cvPolyLineAA
    cvZero( image );

    for( i = 1; i < 10; i++ )
    {
        CvFont font;
        cvInitFont( &font, CV_FONT_VECTOR0,
                    (double)i / 5, (double)i / 5, (double)i / 10, i );
        cvPutText( image, "privet. this is test. :)", cvPoint(0, i * 20), &font, CV_RGB(178+i, 255-i, i) );
    }
    Errors += ProcessImage( image, "cvPutText", read );

    cvReleaseImage( &image );

    return Errors ? trsResult( TRS_FAIL, "errors" ) : trsResult( TRS_OK, "ok" );
}
Esempio n. 17
0
void testApp::doVision() {
	float startTime = ofGetElapsedTimef();
	float img[VISION_WIDTH*VISION_HEIGHT];
	float *distPix = kinect.getDistancePixels();
	
	
	float kinectRangeFar = 4000;
	float kinectRangeNear = 500;
	
	
	float denom = (kinectRangeFar - kinectRangeNear) * (maxWaterDepth - waterThreshold);
	if(denom==0) denom = 0.0001;
	float subtractor = kinectRangeFar - waterThreshold*(kinectRangeFar - kinectRangeNear);
	
	for(int i = 0; i < VISION_WIDTH*VISION_HEIGHT; i++) {
		
		int x = i % VISION_WIDTH;
		int y = i / VISION_WIDTH;
		
		int ii = x*2 + y * 2 * KINECT_WIDTH;
		
		// this is slow
		//img[i] = ofMap(ofMap(distPix[i], 500, 4000, 1, 0), maxWaterDepth, waterThreshold, 1, 0);
		
		img[i] = (subtractor - distPix[ii])/denom;
		if(img[i]>1) img[i] = 0;
		if(img[i]<0) img[i] = 0;
	}
	depthImg.setFromPixels(img,VISION_WIDTH,VISION_HEIGHT);
	
	if(flipX || flipY) depthImg.mirror(flipY, flipX);
	
	rangeScaledImg = depthImg;
	
	for(int i = 0; i < blurIterations; i++) {
		rangeScaledImg.blur(2*blurSize+1);
	}
	if(erode) {
		rangeScaledImg.erode();
	}
	if(dilate) {
		rangeScaledImg.dilate();
	}
	
	
	
	if(accumulateBackground) {
		cvMax(bgImg.getCvImage(), rangeScaledImg.getCvImage(), bgImg.getCvImage());
		bgImg.flagImageChanged();
		
		
		backgroundAccumulationCount++;
		if(backgroundAccumulationCount>100) {
			accumulateBackground = false;
			backgroundAccumulationCount = 0;
		}
	}
	
	
	float *fgPix = rangeScaledImg.getPixelsAsFloats();
	float *bgPix = bgImg.getPixelsAsFloats();
	
	int numPix = VISION_WIDTH * VISION_HEIGHT;
	
	for(int i = 0; i < numPix; i++) {
		if(fgPix[i]<=bgPix[i]+backgroundHysteresis) {
			fgPix[i] = 0;
		}
	}
	
	rangeScaledImg.setFromPixels(fgPix, VISION_WIDTH, VISION_HEIGHT);
	
	
	maskedImg = rangeScaledImg;
	
	CvPoint points[NUM_MASK_POINTS];
	for(int i = 0; i < NUM_MASK_POINTS; i++) {
		points[i] = cvPoint(mask[i].x, mask[i].y);
	}
	
	
	CvPoint fill[4];
	fill[0] = cvPoint(0, 0);
	fill[1] = cvPoint(VISION_WIDTH, 0);
	fill[2] = cvPoint(VISION_WIDTH, VISION_HEIGHT);
	fill[3] = cvPoint(0, VISION_HEIGHT);
	
	CvPoint *allPoints[2];
	allPoints[0] = points;
	allPoints[1] = fill;
	
	int numPoints[2];
	numPoints[0] = NUM_MASK_POINTS;
	numPoints[1] = 4;
	
	// mask out the bit we're interested in
	cvFillPoly(
			   maskedImg.getCvImage(), allPoints,
			   numPoints,
			   2, cvScalar(0)
			   );
	maskedImg.flagImageChanged();
	
	contourFinder.findContours(maskedImg, minBlobSize*minBlobSize, maxBlobSize*maxBlobSize, 20, false);
	
	
	// compare each blob against the other and eradicate any blobs that are too close to eachother
	if(blobs.size()>0) {
		for(int i = 0; i < contourFinder.blobs.size(); i++) {
			for(int j = i+1; j < contourFinder.blobs.size(); j++) {
				float dist = tricks::math::getClosestDistanceBetweenTwoContours(
																				contourFinder.blobs[i].pts, 
																				contourFinder.blobs[j].pts, 3);
				
				// find which one is closest to any other blob and delete the other one
				if(dist<20) {
					float distToI = FLT_MAX;
					float distToJ = FLT_MAX;
					for(map<int,Blob>::iterator it = blobs.begin(); it!=blobs.end(); it++) {
						
						distToI = MIN(distToI, (*it).second.distanceSquared(ofVec2f(contourFinder.blobs[i].centroid)));
						distToJ = MIN(distToJ, (*it).second.distanceSquared(ofVec2f(contourFinder.blobs[j].centroid)));
					}
					if(distToI<distToJ) {
						// merge the jth into the ith, and delete the jth one
						mergeBlobIntoBlob(contourFinder.blobs[i], contourFinder.blobs[j]);
						contourFinder.blobs.erase(contourFinder.blobs.begin() + j);
						j--;
					} else {
						
						// merge the ith into the jth, and delete the ith one
						mergeBlobIntoBlob(contourFinder.blobs[j], contourFinder.blobs[i]);
						contourFinder.blobs.erase(contourFinder.blobs.begin() + i);
						i--;
						break;
					}
				}
			}
		}
		
	}
	blobTracker.trackBlobs(contourFinder.blobs);
	
	
}
Esempio n. 18
0
//获取样本的鼠标操作的窗口过程,生成正负样本
void CMyImage::on_mouseProc( int event, int x, int y, int flags, void* param)
{
	
	myParam *myparam = (myParam*)param;
	//如果是左键点击
	if(myparam->drawFlag==false && myparam->finishFlag == false && event==CV_EVENT_LBUTTONDOWN)
	{
		myparam->drawFlag = true;
	}
	
	if(myparam->drawFlag==true && myparam->finishFlag == false && event==CV_EVENT_MOUSEMOVE)
	{
		IplImage *tempImage = 0;
		tempImage = cvCreateImage(cvGetSize(myparam->img), IPL_DEPTH_8U, 1);
		tempImage = cvCloneImage(myparam->img);

        cvRectangle(tempImage,cvPoint(x,y),cvPoint(x+40,y+40),cvScalar(255,255,255),1);
		cvShowImage("Image",tempImage);
	}

	//右键点击,则生成样本
	if(myparam->drawFlag==true && myparam->finishFlag == false && event==CV_EVENT_RBUTTONDOWN)
	{
		CFileStatus   rStatus;
		CString dirPath = "C:\\OpenCVTrain\\Template";
		if(!CFile::GetStatus(dirPath,rStatus)) //目录不存在
		{
			mkdir(dirPath);
		}

		CvMat *dst;
		dst=cvCreateMat(40,40,CV_8UC1);
		CvRect subRect;

		subRect=cvRect(x,y,40,40);
		cvGetSubRect(myparam->img,dst,subRect);
		IplImage *dst1;
		dst1=cvCreateImage(cvSize(40,40),IPL_DEPTH_8U,1);
		cvGetImage(dst,dst1);
		CString filePath;
		CString fileName;
		fileName.Format("%d%d%s.jpg",x,y,CTime::GetCurrentTime().Format("%Y%m%d%H%M%S"));//保存正样本
		filePath = dirPath + "\\" + fileName;
		cvSaveImage(filePath,dst1);

		// 将目标区域填充成白色,再将整幅图保存为背景图(负样本)
		CvPoint  curve1[]={x,y,  x+40,y,  x+42,y+40,  x,y+40}; 
		CvPoint* curveArr[1]={curve1};
		int  nCurvePts[1]={4};
		int  nCurves=1;

		cvFillPoly(myparam->img,curveArr,nCurvePts,nCurves,cvScalar(255,255,255));


		CString diroffPath = "C:\\OpenCVTrain\\offTemplate";
		if(!CFile::GetStatus(diroffPath,rStatus)) //目录不存在
		{
			mkdir(diroffPath);
		}

		CString bgFilePath;
		CString bgFileName;
		bgFileName.Format("%d%d%s.jpg",x,y,CTime::GetCurrentTime().Format("%Y%m%d%H%M%S"));//保存负样本
		bgFilePath = diroffPath + "\\" + fileName;
		cvSaveImage(bgFilePath,myparam->img);

		myparam->drawFlag = false;
		myparam->finishFlag = true;

	}


}
Esempio n. 19
0
void CV_PyrSegmentationTest::run( int /*start_from*/ )
{
    const int level = 5;
    const double range = 20;

    int code = CvTS::OK;

    CvPoint _cp[] ={{33,33}, {43,33}, {43,43}, {33,43}};
    CvPoint _cp2[] ={{50,50}, {70,50}, {70,70}, {50,70}};
    CvPoint* cp = _cp;
    CvPoint* cp2 = _cp2;
    CvConnectedComp *dst_comp[3];
    CvRect rect[3] = {{50,50,21,21}, {0,0,128,128}, {33,33,11,11}};
    double a[3] = {441.0, 15822.0, 121.0};

/*    ippiPoint cp3[] ={130,130, 150,130, 150,150, 130,150};  */
/*	CvPoint cp[] ={0,0, 5,5, 5,0, 10,5, 10,0, 15,5, 15,0};  */
    int nPoints = 4;
    int block_size = 1000;

    CvMemStorage *storage;   /*   storage for connected component writing  */
    CvSeq *comp;

    CvRNG* rng = ts->get_rng();
    int i, j, iter;

    IplImage *image, *image_f, *image_s;
    CvSize size = {128, 128};
    const int threshold1 = 50, threshold2 = 50;

    rect[1].width = size.width;
    rect[1].height = size.height;
    a[1] = size.width*size.height - a[0] - a[2];

    OPENCV_CALL( storage = cvCreateMemStorage( block_size ) );

    for( iter = 0; iter < 2; iter++ )
    {
        int channels = iter == 0 ? 1 : 3;
        int mask[] = {0,0,0};

        image = cvCreateImage(size, 8, channels );
        image_s = cvCloneImage( image );
        image_f = cvCloneImage( image );

        if( channels == 1 )
        {
            int color1 = 30, color2 = 110, color3 = 180;

            cvSet( image, cvScalarAll(color1));
            cvFillPoly( image, &cp, &nPoints, 1, cvScalar(color2));
            cvFillPoly( image, &cp2, &nPoints, 1, cvScalar(color3));
        }
        else
        {
            CvScalar color1 = CV_RGB(30,30,30), color2 = CV_RGB(255,0,0), color3 = CV_RGB(0,255,0);

            assert( channels == 3 );
            cvSet( image, color1 );
            cvFillPoly( image, &cp, &nPoints, 1, color2);
            cvFillPoly( image, &cp2, &nPoints, 1, color3);
        }

        cvRandArr( rng, image_f, CV_RAND_UNI, cvScalarAll(0), cvScalarAll(range*2) );
        cvAddWeighted( image, 1, image_f, 1, -range, image_f );

        cvPyrSegmentation( image_f, image_s,
                           storage, &comp,
                           level, threshold1, threshold2 );

        if(comp->total != 3)
        {
            ts->printf( CvTS::LOG,
                "The segmentation function returned %d (not 3) components\n", comp->total );
            code = CvTS::FAIL_INVALID_OUTPUT;
            goto _exit_;
        }
        /*  read the connected components     */
        dst_comp[0] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 0 );
        dst_comp[1] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 1 );
        dst_comp[2] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 2 );

        /*{
            for( i = 0; i < 3; i++ )
            {
                CvRect r = dst_comp[i]->rect;
                cvRectangle( image_s, cvPoint(r.x,r.y), cvPoint(r.x+r.width,r.y+r.height),
                    CV_RGB(255,255,255), 3, 8, 0 );
            }

            cvNamedWindow( "test", 1 );
            cvShowImage( "test", image_s );
            cvWaitKey(0);
        }*/

        code = cvTsCmpEps2( ts, image, image_s, 10, false, "the output image" );
        if( code < 0 )
            goto _exit_;

        for( i = 0; i < 3; i++)
        {
            for( j = 0; j < 3; j++ )
            {
                if( !mask[j] && dst_comp[i]->area == a[j] &&
                    dst_comp[i]->rect.x == rect[j].x &&
                    dst_comp[i]->rect.y == rect[j].y &&
                    dst_comp[i]->rect.width == rect[j].width &&
                    dst_comp[i]->rect.height == rect[j].height )
                {
                    mask[j] = 1;
                    break;
                }
            }
            if( j == 3 )
            {
                ts->printf( CvTS::LOG, "The component #%d is incorrect\n", i );
                code = CvTS::FAIL_BAD_ACCURACY;
                goto _exit_;
            }
        }

        cvReleaseImage(&image_f);
        cvReleaseImage(&image);
        cvReleaseImage(&image_s);
    }

_exit_:

    cvReleaseMemStorage( &storage );
    cvReleaseImage(&image_f);
    cvReleaseImage(&image);
    cvReleaseImage(&image_s);

    if( code < 0 )
        ts->set_failed_test_info( code );
}
int main(int argc, char* argv[]) {
    CvMemStorage *contStorage = cvCreateMemStorage(0);
    CvSeq *contours;
    CvTreeNodeIterator polyIterator;

    CvMemStorage *mallet_storage;
	CvSeq *mallet_circles = 0;
	float *mallet_p;
	int mi;

    int found = 0;
    int i;
    CvPoint poly_point;
	int fps=30;

	int npts[2] = { 4, 12 };
	CvPoint **pts;

	pts = (CvPoint **) cvAlloc (sizeof (CvPoint *) * 2);
	pts[0] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 4);
	pts[1] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 12);
	pts[0][0] = cvPoint(0,0);
	pts[0][1] = cvPoint(160,0);
	pts[0][2] = cvPoint(320,240);
	pts[0][3] = cvPoint(0,240);
	pts[1][0] = cvPoint(39,17);
	pts[1][1] = cvPoint(126,15);
	pts[1][2] = cvPoint(147,26);
	pts[1][3] = cvPoint(160,77);
	pts[1][4] = cvPoint(160,164);
	pts[1][5] = cvPoint(145,224);
	pts[1][6] = cvPoint(125,233);
	pts[1][7] = cvPoint(39,233);
	pts[1][8] = cvPoint(15,217);
	pts[1][9] = cvPoint(0,133);
	pts[1][10] = cvPoint(0,115);
	pts[1][11] = cvPoint(17,28);

	// ポリライン近似
    CvMemStorage *polyStorage = cvCreateMemStorage(0);
    CvSeq *polys, *poly;

	// OpenCV variables
	CvFont font;

    printf("start!\n");

	//pwm initialize
	if(gpioInitialise() < 0) return -1;
	//pigpio CW/CCW pin setup
	//X:18, Y1:14, Y2:15
	gpioSetMode(18, PI_OUTPUT);
	gpioSetMode(14, PI_OUTPUT);
	gpioSetMode(15, PI_OUTPUT);
	//pigpio pulse setup
	//X:25, Y1:23, Y2:24
	gpioSetMode(25, PI_OUTPUT);
	gpioSetMode(23, PI_OUTPUT);
	gpioSetMode(24, PI_OUTPUT);
	//limit-switch setup
	gpioSetMode(5, PI_INPUT);
	gpioWrite(5, 0);
	gpioSetMode(6, PI_INPUT);
	gpioWrite(6, 0);
	gpioSetMode(7, PI_INPUT);
	gpioWrite(7, 0);
	gpioSetMode(8, PI_INPUT);
	gpioWrite(8, 0);
	gpioSetMode(13, PI_INPUT);
	gpioSetMode(19, PI_INPUT);
	gpioSetMode(26, PI_INPUT);
	gpioSetMode(21, PI_INPUT);

	CvCapture* capture_robot_side = cvCaptureFromCAM(0);
	CvCapture* capture_human_side = cvCaptureFromCAM(1);
    if(capture_robot_side == NULL){
		std::cout << "Robot Side Camera Capture FAILED" << std::endl;
		return -1;
	 }
	if(capture_human_side ==NULL){
		std::cout << "Human Side Camera Capture FAILED" << std::endl;
		return -1;
	}

	// size設定
    cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	//fps設定
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps);

	// 画像の表示用ウィンドウ生成
	//cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("pack", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE);
	cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE);

	//Create trackbar to change brightness
	int iSliderValue1 = 50;
	cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100);
	//Create trackbar to change contrast
	int iSliderValue2 = 50;
	cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100);
	//pack threthold 0, 50, 120, 220, 100, 220
	int iSliderValuePack1 = 54; //80;
	cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255);
	int iSliderValuePack2 = 84;//106;
	cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255);
	int iSliderValuePack3 = 100;//219;
	cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255);
	int iSliderValuePack4 = 255;//175;
	cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255);
	int iSliderValuePack5 = 0;//29;
	cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255);
	int iSliderValuePack6 = 255;//203;
	cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255);
	//mallet threthold 0, 255, 100, 255, 140, 200
	int iSliderValuemallet1 = 107;
	cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255);
	int iSliderValuemallet2 = 115;
	cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255);
	int iSliderValuemallet3 = 218;//140
	cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255);
	int iSliderValuemallet4 = 255;
	cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255);
	int iSliderValuemallet5 = 0;
	cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255);
	int iSliderValuemallet6 = 255;
	cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255);

	// 画像ファイルポインタの宣言
	IplImage* img_robot_side = cvQueryFrame(capture_robot_side);
	IplImage* img_human_side = cvQueryFrame(capture_human_side);
	IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* img_all_round2  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* show_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);

	cv::Mat mat_frame1;
	cv::Mat mat_frame2;
	cv::Mat dst_img_v;
	cv::Mat dst_bright_cont;
	int iBrightness  = iSliderValue1 - 50;
	double dContrast = iSliderValue2 / 50.0;
	IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3);
	IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1);

	int rotate_times = 0;
	//IplImage* -> Mat
	mat_frame1 = cv::cvarrToMat(img_robot_side);
	mat_frame2 = cv::cvarrToMat(img_human_side);
	//上下左右を反転。本番環境では、mat_frame1を反転させる
	cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
	cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
	vconcat(mat_frame2, mat_frame1, dst_img_v);

	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
	//画像の膨張と縮小
//	cv::Mat close_img;
//	cv::Mat element(3,3,CV_8U, cv::Scalar::all(255));
//	cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3);
//	cv::imshow("morphologyEx", dst_img_v);
//	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート

	//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
	*img_all_round = dst_bright_cont;

	cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 11, 180, 255, 0, 255);

	cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY);
	cv_Labelling(grayscale_img, tracking_img);

	cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY);

	cvCopy( poly_gray, poly_tmp);
	cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR);

	//画像の膨張と縮小
	//cvMorphologyEx(tracking_img, tracking_img,)

	// 輪郭抽出
	found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

	// ポリライン近似
	polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10);

	cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10);
	poly = (CvSeq *)cvNextTreeNode( &polyIterator);
	printf("sort before by X\n");
	for( i=0; i<poly->total; i++)
	{
		poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
		cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1);
		cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255));
		std::cout << "x:" << poly_point.x << ", y:" << poly_point.y  << std::endl;
	}
	printf("Poly FindTotal:%d\n",poly->total);

	//枠の座標決定
	//左上 の 壁サイド側 upper_left_f
	//左上 の ゴール寄り  upper_left_g
	//右上 の 壁サイド側 upper_right_f
	//右上 の ゴール寄り  upper_right_g
	//左下 の 壁サイド側 lower_left_f
	//左下 の ゴール寄り  lower_left_g
	//右下 の 壁サイド側 lower_right_f
	//右下 の ゴール寄り  lower_right_g
	CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g,
			lower_left_f, lower_left_g, lower_right_f, lower_right_g,
			robot_goal_left, robot_goal_right;

	CvPoint frame_points[8];
//	if(poly->total == 8){
//		for( i=0; i<8; i++){
//			poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//			frame_points[i] = poly_point;
//		}
//		qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint);
//		printf("sort after by X\n");
//		for( i=0; i<8; i++){
//			std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y  << std::endl;
//		}
//		if(frame_points[0].y < frame_points[1].y){
//			upper_left_f = frame_points[0];
//			lower_left_f = frame_points[1];
//		}
//		else{
//			upper_left_f = frame_points[1];
//			lower_left_f = frame_points[0];
//		}
//		if(frame_points[2].y < frame_points[3].y){
//			upper_left_g = frame_points[2];
//			lower_left_g = frame_points[3];
//		}
//		else{
//			upper_left_g = frame_points[3];
//			lower_left_g = frame_points[2];
//		}
//		if(frame_points[4].y < frame_points[5].y){
//			upper_right_g = frame_points[4];
//			lower_right_g = frame_points[5];
//		}
//		else{
//			upper_right_g = frame_points[5];
//			lower_right_g = frame_points[4];
//		}
//		if(frame_points[6].y < frame_points[7].y){
//			upper_right_f = frame_points[6];
//			lower_right_f = frame_points[7];
//		}
//		else{
//			upper_right_f = frame_points[7];
//			lower_right_f = frame_points[6];
//		}
//	}
//	else{
		printf("Frame is not 8 Point\n");
		upper_left_f = cvPoint(26, 29);
		upper_right_f =  cvPoint(136, 29);
		lower_left_f = cvPoint(26, 220);
		lower_right_f =  cvPoint(136, 220);

		upper_left_g = cvPoint(38, 22);
		upper_right_g = cvPoint(125, 22);
		lower_left_g =  cvPoint(38, 226);
		lower_right_g = cvPoint(125, 226);

		robot_goal_left = cvPoint(60, 226);
		robot_goal_right = cvPoint(93, 226);

//		cvCopy(img_all_round, show_img);
//		cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 ));
//
//		cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 ));

		//while(1){
			//cvShowImage("Now Image", show_img);
			//cvShowImage ("Poly", poly_dst);
			//if(cv::waitKey(1) >= 0) {
				//break;
			//}
		//}
		//return -1;
//	}
	printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y);
	printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y);
	printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y);
	printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y);
	printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y);
	printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y);
	printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y);
	printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y);
	printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y);
	printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y);

    cvReleaseImage(&dst_img_frame);
    cvReleaseImage(&grayscale_img);
    cvReleaseImage(&poly_tmp);
    cvReleaseImage(&poly_gray);

    cvReleaseMemStorage(&contStorage);
    cvReleaseMemStorage(&polyStorage);
	//return 1;
	// Init font
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);
	bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする

	while(1){
		//決定ボタンが押されたらスタート
		if(gpioRead(8)==0 && is_pushed_decision_button==1){
			cvCopy(img_all_round, img_all_round2);
			cvCopy(img_all_round, show_img);
			img_robot_side = cvQueryFrame(capture_robot_side);
			img_human_side = cvQueryFrame(capture_human_side);
			//IplImage* -> Mat
			mat_frame1 = cv::cvarrToMat(img_robot_side);
			mat_frame2 = cv::cvarrToMat(img_human_side);
			//上下左右を反転。本番環境では、mat_frame1を反転させる
			cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
			cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
			vconcat(mat_frame2, mat_frame1, dst_img_v);

			iBrightness  = iSliderValue1 - 50;
			dContrast = iSliderValue2 / 50.0;
			dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
			//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
			*img_all_round = dst_bright_cont;
			mat_frame1.release();
			mat_frame2.release();
			dst_img_v.release();

			cvFillPoly(img_all_round, pts, npts, 2, CV_RGB(0, 0, 0));

			IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);

			cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);
			cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6);
			cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);

			//CvMoments moment_mallet;
			CvMoments moment_pack;
			CvMoments moment_mallet;
			CvMoments moment2_pack;
			//cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img_pack, 1);
			cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img2_pack, 1);

			//cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img_pack, &moment_pack, 0);
			cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img2_pack, &moment2_pack, 0);

			//座標計算
			double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0);
			double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0);
			double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1);
			double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0);
			double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0);
			double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1);
			double gX_before = m10_before/m00_before;
			double gY_before = m01_before/m00_before;
			double gX_after = m10_after/m00_after;
			double gY_after = m01_after/m00_after;
			double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0);
			double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0);
			double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1);
			double gX_now_mallet = m10_mallet/m00_mallet;
			double gY_now_mallet = m01_mallet/m00_mallet;

			int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0
			//円の大きさは全体の1/10で描画
			cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2);
			cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2);
			printf("gX_after: %f\n",gX_after);
			printf("gY_after: %f\n",gY_after);
			printf("gX_before: %f\n",gX_before);
			printf("gY_before: %f\n",gY_before);
			printf("gX_now_mallet: %f\n",gX_now_mallet);
			printf("gY_now_mallet: %f\n",gY_now_mallet);
			int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。
			//パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
			double a_inclination;
			double b_intercept;

			int closest_frequency;

			int target_coordinateX;
			int origin_coordinateY;
			int target_coordinateY;

			double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;

			double y_line = (upper_left_f.y + lower_right_f.y)/3;
			double waiting_position = (robot_goal_left.x + lower_left_g.x) / 2;

			if(gY_after - gY_before < -1){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 600);
				target_coordinateX = waiting_position;
				if(waiting_position + 5 < gX_now_mallet){
					target_direction = 0;//反時計回り
				}
				else if(gX_now_mallet < waiting_position - 5){
					target_direction = 1;//時計回り
				}
			}
			/*else if(robot_goal_right.x < gX_now_mallet){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 0;//反時計回り
			}
			else if(gX_now_mallet < robot_goal_left.x){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 1;//時計回り
			}*/
			else if(y_line < gY_after && y_line > gY_before){
				clock_t start = clock();
				clock_t end;
				end = start + 0.5 * (target_coordinateX - robot_goal_left.x) / 10;
				target_direction = 1;
				gpioPWM(25, 128);
				gpioWrite(18, target_direction);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				while(end - start < 0);//時間がくるまでループ
			}
			else{
				gpioPWM(25, 0);
				closest_frequency = gpioSetPWMfrequency(25, 0);
			}



			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			//防御ラインの描画
			cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
			//マレットの動きの描画
			cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);

			/*

			int amount_movement = target_coordinateX - gX_now_mallet;

			//reacted limit-switch and target_direction rotate
//			if(gpioRead(6) == 1){//X軸右
//				gpioPWM(25, 128);
//				closest_frequency = gpioSetPWMfrequency(25, 1500);
//				target_direction = 0;//反時計回り
//				printf("X軸右リミット!反時計回り\n");
//			}
//			else
			if(gpioRead(26) == 1){//X軸左
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
				printf("X軸左リミット!時計回り\n");
			}
			else if(gpioRead(5) == 1){//Y軸右上
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 0);
				printf("Y軸右上リミット!時計回り\n");
			}
			else if(gpioRead(13) == 1){//Y軸右下
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 1);
				printf("Y軸右下リミット!反時計回り\n");
			}
			else if(gpioRead(19) == 1){//Y軸左下
				gpioPWM(24, 128);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 0);
				printf("Y軸左下リミット!時計回り\n");
			}

			else if(gpioRead(21) == 1){//Y軸左上
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 1);
				printf("Y軸左上リミット!反時計回り\n");
			}
			else{
				//Y軸固定のため
				gpioSetPWMfrequency(23, 0);
				gpioSetPWMfrequency(24, 0);

				if(amount_movement > 0){
					target_direction = 1;//時計回り
				}
				else if(amount_movement < 0){
					target_direction = 0;//反時計回り
				}
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			else{
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 0);
			}
			printf("setting_frequency: %d\n", closest_frequency);*/

			// 指定したウィンドウ内に画像を表示する
			//cvShowImage("Previous Image", img_all_round2);
			cvShowImage("Now Image", show_img);
			cvShowImage("pack", dst_img_pack);
			cvShowImage("mallet", dst_img_mallet);
			cvShowImage ("Poly", poly_dst);

			cvReleaseImage (&dst_img_mallet);
			cvReleaseImage (&dst_img_pack);
			cvReleaseImage (&dst_img2_mallet);
			cvReleaseImage (&dst_img2_pack);

			if(cv::waitKey(1) >= 0) {
				break;
			}
		}
		else{ //リセット信号が来た場合
			is_pushed_decision_button = 0;
		}
    }

    gpioTerminate();

    cvDestroyAllWindows();

	//Clean up used CvCapture*
	cvReleaseCapture(&capture_robot_side);
	cvReleaseCapture(&capture_human_side);
    //Clean up used images
	cvReleaseImage(&poly_dst);
	cvReleaseImage(&tracking_img);
    cvReleaseImage(&img_all_round);
    cvReleaseImage(&img_human_side);
    cvReleaseImage(&img_all_round2);
    cvReleaseImage(&show_img);
    cvReleaseImage(&img_robot_side);
    cvFree(&pts[0]);
	cvFree(&pts[1]);
	cvFree(pts);

    return 0;
}