コード例 #1
0
ファイル: main.c プロジェクト: ntavish/tri
void findContours( IplImage* img, CvMemStorage* storage, CvSeq **contours)
{
    //for findContour function
    IplImage* timg  =NULL;
    IplImage* gray  =NULL;
    IplImage* tgray =NULL;

    CvSize sz = cvSize( img->width, img->height );

	// make a copy of input image
	gray = cvCreateImage( sz, img->depth, 1 );
	timg = cvCreateImage( sz, img->depth, 1 );
	tgray = cvCreateImage( sz, img->depth, 1 );

	cvSetImageCOI(img,1);
    cvCopy( img, timg,NULL );
	cvSetImageCOI(img,0);

    cvCopy( timg, tgray, 0 );

    cvCanny( tgray, gray, ct1, ct2, 5 );
    // holes between edge segments
    cvDilate( gray, gray, 0, 2 );

    cvFindContours( gray, storage, contours,
                    sizeof(CvContour),CV_RETR_LIST,
                    CV_CHAIN_APPROX_NONE, cvPoint(0,0) );

    //release all the temporary images
    cvReleaseImage( &gray );
    cvReleaseImage( &tgray );
    cvReleaseImage( &timg );

}
コード例 #2
0
ファイル: rgb2hsi.c プロジェクト: sumitsrv/vk
void hsi2(IplImage *img)
{
	IplImage *hsv = cvCloneImage(img);
	IplImage *h = cvCreateImage(cvSize(hsv->width, hsv->height), hsv->depth, 1);
	IplImage *s = cvCreateImage(cvSize(hsv->width, hsv->height), hsv->depth, 1);
	cvCvtColor(img, hsv, CV_BGR2HSV);
	//cvShowImage("rgb",img);
	cvSetImageCOI(hsv,1);
	cvCopy(hsv, h, 0);
	cvThreshold( h, h, 40, 255, CV_THRESH_BINARY_INV );
    cvCopy(h, hsv, 0);
	           
	cvSetImageCOI(hsv,2);
	cvCopy(hsv, s, 0);
	cvThreshold( s, s, 35, 255, CV_THRESH_BINARY );
    cvCopy(s, hsv, 0);
	
	cvSetImageCOI(hsv, 0);
	cvCvtColor(hsv, img, CV_HSV2BGR);
	cvShowImage("img",img);
	cvReleaseImage(&hsv);
	cvReleaseImage(&s);
	cvReleaseImage(&h);
	
        
}
コード例 #3
0
ファイル: motion_detect.c プロジェクト: sumitsrv/vk
void x(IplImage *img1, IplImage *img2, IplImage *imgsize)
{
	IplImage *imggray1;
    IplImage *imggray2;
    IplImage *imggray3;
    
	// grayscale buffers
    imggray1 = cvCreateImage( cvSize( imgsize->width, imgsize->height ), IPL_DEPTH_8U, 1);
    imggray2 = cvCreateImage( cvSize( imgsize->width, imgsize->height ), IPL_DEPTH_8U, 1);
    imggray3 = cvCreateImage( cvSize( imgsize->width, imgsize->height ), IPL_DEPTH_8U, 1);
	
	IplImage *hsv1 = cvCloneImage(img1);
	IplImage *hsv2 = cvCloneImage(img2);
	
	cvCvtColor( img2, imggray2, CV_RGB2GRAY );
    cvCvtColor(img1, hsv2, CV_BGR2HSV);
    cvSetImageCOI(hsv2, 1);
    cvCopy(hsv2, imggray2, 0);           
    // convert rgb to grayscale
    cvCvtColor( img1, imggray1, CV_RGB2GRAY );
    cvCvtColor(img2, hsv1, CV_BGR2HSV);
    cvSetImageCOI(hsv1, 1);
    cvCopy(hsv1, imggray1, 0);           
    // compute difference
    cvAbsDiff( imggray1, imggray2, imggray3 );
    cvShowImage( "video", imggray3 );    
    cvReleaseImage(&imggray1);
    cvReleaseImage(&imggray2);
    cvReleaseImage(&imggray3);
    cvReleaseImage(&hsv1);
    cvReleaseImage(&hsv2);
}
コード例 #4
0
ファイル: camera_self_filter.cpp プロジェクト: TTgogogo/siml
void alpha_image_cb(const sensor_msgs::ImageConstPtr& msg_ptr){


    calc_and_publish_BWMask(msg_ptr->header.stamp, msg_ptr->header.frame_id);
    IplImage* cam_image = bridge.imgMsgToCv(msg_ptr);
    IplImage* cam_alpha_image = cvCreateImage(cvGetSize(cam_image), IPL_DEPTH_8U, 4);

    //b
    cvSetImageCOI(cam_alpha_image, 1);
    cvSetImageCOI(cam_image, 1);
    cvCopy(cam_image, cam_alpha_image);

    //g
    cvSetImageCOI(cam_alpha_image, 2);
    cvSetImageCOI(cam_image, 2);
    cvCopy(cam_image, cam_alpha_image);

    //r
    cvSetImageCOI(cam_alpha_image, 3);
    cvSetImageCOI(cam_image, 3);
    cvCopy(cam_image, cam_alpha_image);

    //alpha
    cvSetImageCOI(cam_alpha_image, 4);
    cvCopy(ipl_maskBW, cam_alpha_image);
    cvSetImageCOI(cam_alpha_image, 0);


    sensor_msgs::ImagePtr img_msg = sensor_msgs::CvBridge::cvToImgMsg(cam_alpha_image);
    img_msg->header = msg_ptr->header;
    image_publisher.publish(img_msg);
    cvReleaseImage(&cam_alpha_image);


}
コード例 #5
0
ファイル: iplimage.cpp プロジェクト: thenoseman/ruby-opencv
/*
 * call-seq:
 *   set_coi(<i>coi</i>)
 *   set_coi(<i>coi</i>){|image| ...}
 *
 * Set COI. <i>coi</i> should be Fixnum.
 * Return self.
 */
VALUE
rb_set_coi(VALUE self, VALUE coi)
{
  VALUE block = rb_block_given_p() ? rb_block_proc() : 0;
  if (block) {
    int prev_coi = cvGetImageCOI(IPLIMAGE(self));
    cvSetImageCOI(IPLIMAGE(self), FIX2INT(coi));
    rb_yield_values(1, self);
    cvSetImageCOI(IPLIMAGE(self), prev_coi);
  } else {
    cvSetImageCOI(IPLIMAGE(self), FIX2INT(coi));
  }
  return self;
}
コード例 #6
0
ファイル: metric.cpp プロジェクト: manasabhat/ITG
float brightness(IplImage* image)
{
	IplImage* temp;
    temp = cvCreateImage(cvSize(image->width,image->height),IPL_DEPTH_8U,3);
    cvCvtColor( image,temp,CV_RGB2YCrCb);
    cvSetImageCOI(temp,1);
    CvScalar scal = cvAvg( temp );
    float metric = (float)scal.val[0]/255;
    return(metric);
}
コード例 #7
0
int __stdcall MatchCheckCode(char *szPath, char *szCode,int iThreadShod)
{
	try
	{
		IplImage *m_Source_Pic = NULL;
		m_Source_Pic = cvLoadImage(szPath);
		if (!m_Source_Pic)
		{
			strcpy(szCode,"can not open the image");
			return -2;	//找不到
		}
		IplImage * RedChannel   = cvCreateImage( cvGetSize(m_Source_Pic), 8, 1);
		IplImage * GreenChannel  = cvCreateImage( cvGetSize(m_Source_Pic), 8, 1);
		//IplImage * BlueChannel = cvCreateImage( cvGetSize(m_Source_Pic), 8, 1);
		//cvSetImageCOI(m_Source_Pic,1); 
		//cvCopy(m_Source_Pic,BlueChannel); //提取蓝色
		cvSetImageCOI(m_Source_Pic,2);
		cvCopy(m_Source_Pic,GreenChannel);  //提取绿色
		cvSetImageCOI(m_Source_Pic,3);
		cvCopy(m_Source_Pic,RedChannel); //提取红色
		cvAdd(RedChannel,GreenChannel,RedChannel);
		cvThreshold(RedChannel,RedChannel,iThreadShod,255,CV_THRESH_BINARY);
		strcat(szPath,"2.bmp");
		cvSaveImage(szPath,RedChannel);
		char *szCodeDll = OCR(szPath,-1);
		CString strRet = GetRetString(szCodeDll);
		strcpy(szCode,strRet);
		cvReleaseImage(&m_Source_Pic);
		cvReleaseImage(&RedChannel);
		cvReleaseImage(&GreenChannel);
		//cvReleaseImage(&BlueChannel);
		return 0;
	}
	catch (...)
	{
		strcpy(szCode,"excption");
	}
	return -1;
}
コード例 #8
0
IplImage *THISCLASS::SwitchChannels(IplImage *src, char *order) {
	// Create a new image of the same size
	IplImage* dest = cvCreateImage(cvGetSize(src), src->depth, 3);

	// Copy the channels to their desired position
	for (int srci = 0; srci < 3; srci++) {
		for (int desti = 0; desti < 3; desti++) {
			if (order[desti] == src->channelSeq[srci]) {
				dest->channelSeq[desti] = order[desti];

				cvSetImageCOI(src, srci + 1);
				cvSetImageCOI(dest, desti + 1);
				cvCopy(dest, src);
				break;
			}
		}
	}

	// Reset the COI
	cvSetImageCOI(src, 0);
	cvSetImageCOI(dest, 0);
	return dest;
}
コード例 #9
0
ファイル: image_utils.cpp プロジェクト: ulyssesrr/carmen_lcad
void image_utils_equalize(IplImage *src, IplImage *dst)
{
  IplImage *channels[3];
  int c;

  cvCopy(src, dst, NULL);

  for (c = 0; c < 3; c++)
  {
    channels[c] = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
    cvSetImageCOI(src, c + 1);
    cvCopy(src, channels[c], NULL);
    cvSetImageCOI(src, 0);
    cvEqualizeHist(channels[c], channels[c]);
  }

  cvMerge(channels[0], channels[1], channels[2], NULL, dst);

  for (c = 0; c < 3; c++)
  {
    cvReleaseImage(&channels[c]);
  }
}
コード例 #10
0
ファイル: SixFringeCapture.cpp プロジェクト: karpinsn/SLS
bool SixFringeCapture::newImage(IplImage* image)
{
  bool needRedraw = false;

  cvSetImageCOI(m_fringeLoadingImage.get(), (m_currentChannelLoad + 1));
  cvCopy(image, m_fringeLoadingImage.get());
  cvSetImageCOI(m_fringeLoadingImage.get(), 0);

  m_currentChannelLoad++;

  if(m_currentChannelLoad == 3)
  {
    int backBufferIndex = (m_frontBufferIndex + 1) % 2;
    m_fringeImages[backBufferIndex][m_currentFringeLoad]->transferToTexture(m_fringeLoadingImage.get());
    m_currentChannelLoad = 0;
    m_currentFringeLoad++;
  }

  if(m_currentFringeLoad == 2)
  {
	  //  Capture first 6 images as reference plane
	if(!m_haveReferencePhase)
	  { m_captureReferencePhase = true; }

    m_currentChannelLoad = 0;
    m_currentFringeLoad = 0;
    swapFringeBuffers();
    m_3dpsCalculator.frameUpdate();
    needRedraw = true;
  }

  //	Make sure we dont have any errors
  OGLStatus::logOGLErrors("SixFringeCapture - setBackHoloBuffer()");

  return needRedraw;
}
コード例 #11
0
ファイル: fit.c プロジェクト: neldredge/horizonator
static CvMat* extractEdges( IplImage* img, image_type_t source )
{
  CvMat* temp_16sc;
  CvMat* temp_8uc;
  CvMat* edges;
  edges     = cvCreateMat( img->height, img->width, CV_8UC1 );
  temp_16sc = cvCreateMat( img->height, img->width, CV_16SC1 );
  temp_8uc  = cvCreateMat( img->height, img->width, CV_8UC1 );

  cvZero(edges);


  void accumulateEdgesFromChannel( int channel )
  {
    if( channel > 0 )
      cvSetImageCOI( img, channel );

    cvCopy(img, temp_8uc, NULL); // needed only becaues cvLaplace() doesn't support COI

    if( source == PHOTO )
      cvSmooth(temp_8uc, temp_8uc, CV_GAUSSIAN, PHOTO_PRESMOOTH, PHOTO_PRESMOOTH, 0.0, 0.0);

    cvLaplace(temp_8uc, temp_16sc, 3);
    cvAbs( temp_16sc, temp_16sc );
    cvAdd( edges, temp_16sc, edges, NULL);
  }


  if( img->nChannels == 1 )
    accumulateEdgesFromChannel( -1 );
  else if( source != PHOTO )
    accumulateEdgesFromChannel( 1 );
  else
    for(int i = 0; i < 3; i++)
      accumulateEdgesFromChannel( i+1 );


  cvReleaseMat(&temp_16sc);
  cvReleaseMat(&temp_8uc);
  return edges;
}
コード例 #12
0
ファイル: VideoProcessor.cpp プロジェクト: Buza/CVOCV--iOS-
/*!
 * @function cannyTest
 * @discussion Canny edge detection.
 * @updated 2011-4-13
 */
char* canny(IplImage *frameImage)
{
    //Select based on the capture dimensions.
    switch(captureSize)
    {
        case(SMALL_BACK):
        case(SMALL_FRONT):
            convertedImage = cvCreateImage(cvSize(192, 144), IPL_DEPTH_8U, 4);
            break;
        case(MEDIUM_BACK):
        case(LARGE_FRONT):
        case(MEDIUM_FRONT):
            convertedImage = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 4);
            break;
        case(LARGE_BACK):
            convertedImage = cvCreateImage(cvSize(1280, 720), IPL_DEPTH_8U, 4);
            break;
    }

    CvSize sz = cvSize(frameImage->width & -2, frameImage->height & -2);
    IplImage* timg = cvCloneImage(frameImage);
    IplImage* gray = cvCreateImage(sz, IPL_DEPTH_8U, 1); 
    IplImage* tgray =  cvCreateImage(sz, IPL_DEPTH_8U, 1);
    
    cvSetImageCOI(frameImage, 1);
    
    cvCopy(frameImage, tgray, 0);
    cvCanny(tgray, gray, 0, 5, 5);
    
    cvDilate(gray, gray, 0, 1);
    
    cvCvtColor(gray, convertedImage, CV_GRAY2RGB);
    
    cvReleaseImage(&gray);
    cvReleaseImage(&tgray);
    cvReleaseImage(&timg);   
    
    return convertedImage->imageDataOrigin;
}
コード例 #13
0
ファイル: fullcalib.cpp プロジェクト: emblem/aril
bool photometric_calibration(CalibModel &model, CvCapture *capture, 
			     int nbImages, bool cache)
{

  if (cache) model.map.load();

  const char *win = "BazAR";

  IplImage*gray=0;

  cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
  cvNamedWindow("LightMap", CV_WINDOW_AUTOSIZE);

  IplImage* frame = 0;
  IplImage* display=cvCloneImage(cvQueryFrame(capture));

  int nbHomography =0;
  LightCollector lc(model.map.reflc);
  IplImage *lightmap = cvCreateImage(cvGetSize(model.map.map.getIm()), IPL_DEPTH_8U, 
				     lc.avgChannels);
  while (1)
    {
      // acquire image
      frame = cvQueryFrame( capture );
      /*
	if (frame) cvReleaseImage(&frame);
	frame = cvLoadImage("model.bmp",1);
      */
      if( !frame )
	break;

      // convert it to gray levels, if required
      if (frame->nChannels >1) {
	if( !gray ) 
	  gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
	cvCvtColor(frame, gray, CV_RGB2GRAY);
      } else {
	gray = frame;
      }

      // run the detector
      if (model.detector.detect(gray)) {
	// 2d homography found
	nbHomography++;

	// Computes 3D pose and surface normal
	model.augm.Clear();
	add_detected_homography(model.detector, model.augm);
	model.augm.Accomodate(4, 1e-4);
	CvMat *mat = model.augm.GetObjectToWorld();
	float normal[3];
	for (int j=0;j<3;j++) normal[j] = cvGet2D(mat, j, 2).val[0];
	cvReleaseMat(&mat);

	// average pixels over triangles
	lc.averageImage(frame,model.detector.H);

	// add observations
	if (!model.map.isReady())
	  model.map.addNormal(normal, lc, 0);

	if (!model.map.isReady() && nbHomography >= nbImages) {
	  if (model.map.computeLightParams()) {
	    model.map.save();
	    const float *gain = model.map.getGain(0);
	    const float *bias = model.map.getBias(0);
	    cout << "Gain: " << gain[0] << ", " << gain[1] << ", " << gain[2] << endl;
	    cout << "Bias: " << bias[0] << ", " << bias[1] << ", " << bias[2] << endl;
	  }
	} 
      } 
		
      if (model.map.isReady()) {
	double min, max;
	IplImage *map = model.map.map.getIm();
	cvSetImageCOI(map, 2);
	cvMinMaxLoc(map, &min, &max);
	cvSetImageCOI(map, 0);
	assert(map->nChannels == lightmap->nChannels);
	cvConvertScale(map, lightmap, 128, 0);
	cvShowImage("LightMap", lightmap);
	augment_scene(model, frame, display);
      } else {
	cvCopy(frame,display);
	if (model.detector.object_is_detected)
	  lc.drawGrid(display, model.detector.H);
      }

      cvShowImage(win, display);

      int k=cvWaitKey(10);
      if (k=='q' || k== 27)
	break;
    }

  cvReleaseImage(&lightmap);
  cvReleaseImage(&display);
  if (frame->nChannels > 1)
    cvReleaseImage(&gray);
  return 0;
}
コード例 #14
0
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
CvSeq* findSquares4( IplImage* img, CvMemStorage* storage )
{
    CvSeq* contours;
    int i, c, l, N = 11;
    CvSize sz = cvSize( img->width & -2, img->height & -2 );
    IplImage* timg = cvCloneImage( img ); // make a copy of input image
    IplImage* gray = cvCreateImage( sz, 8, 1 );
    IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
    IplImage* tgray;
    CvSeq* result;
    double s, t;
    // create empty sequence that will contain points -
    // 4 points per square (the square's vertices)
    CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );

    // select the maximum ROI in the image
    // with the width and height divisible by 2
    cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));

    // down-scale and upscale the image to filter out the noise
    cvPyrDown( timg, pyr, 7 );
    cvPyrUp( pyr, timg, 7 );
    tgray = cvCreateImage( sz, 8, 1 );

    // find squares in every color plane of the image
    for( c = 0; c < 3; c++ )
    {
        // extract the c-th color plane
        cvSetImageCOI( timg, c+1 );
        cvCopy( timg, tgray, 0 );

        // try several threshold levels
        for( l = 0; l < N; l++ )
        {
            // hack: use Canny instead of zero threshold level.
            // Canny helps to catch squares with gradient shading
            if( l == 0 )
            {
                // apply Canny. Take the upper threshold from slider
                // and set the lower to 0 (which forces edges merging)
                cvCanny( tgray, gray, 0, thresh, 5 );
                // dilate canny output to remove potential
                // holes between edge segments
                cvDilate( gray, gray, 0, 1 );
            }
            else
            {
                // apply threshold if l!=0:
                //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
            }

            // find contours and store them all as a list
            cvFindContours( gray, storage, &contours, sizeof(CvContour),
                CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

            // test each contour
            while( contours )
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                result = cvApproxPoly( contours, sizeof(CvContour), storage,
                    CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
                // square contours should have 4 vertices after approximation
                // relatively large area (to filter out noisy contours)
                // and be convex.
                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if( result->total == 4 &&
                    cvContourArea(result,CV_WHOLE_SEQ,0) > 500 &&
                    cvCheckContourConvexity(result) )
                {
                    s = 0;

                    for( i = 0; i < 5; i++ )
                    {
                        // find minimum angle between joint
                        // edges (maximum of cosine)
                        if( i >= 2 )
                        {
                            t = fabs(angle(
                            (CvPoint*)cvGetSeqElem( result, i ),
                            (CvPoint*)cvGetSeqElem( result, i-2 ),
                            (CvPoint*)cvGetSeqElem( result, i-1 )));
                            s = s > t ? s : t;
                        }
                    }

                    // if cosines of all angles are small
                    // (all angles are ~90 degree) then write quandrange
                    // vertices to resultant sequence
                    if( s < 0.3 )
                        for( i = 0; i < 4; i++ )
                            cvSeqPush( squares,
                                (CvPoint*)cvGetSeqElem( result, i ));
                }

                // take the next contour
                contours = contours->h_next;
            }
        }
    }

    // release all the temporary images
    cvReleaseImage( &gray );
    cvReleaseImage( &pyr );
    cvReleaseImage( &tgray );
    cvReleaseImage( &timg );

    return squares;
}
コード例 #15
0
int main(int argc, char* argv[]) {
    CvMemStorage *contStorage = cvCreateMemStorage(0);
    CvSeq *contours;
    CvTreeNodeIterator polyIterator;
    
    int found = 0;
    int i;
    CvPoint poly_point;
	int fps=30;
	
	// ポリライン近似
    CvMemStorage *polyStorage = cvCreateMemStorage(0);
    CvSeq *polys, *poly;

	// OpenCV variables
	CvFont font;
	
    printf("start!\n");

	//pwm initialize
	if(gpioInitialise() < 0) return -1;
	//pigpio CW/CCW pin setup
	//X:18, Y1:14, Y2:15
	gpioSetMode(18, PI_OUTPUT);
	gpioSetMode(14, PI_OUTPUT);
	gpioSetMode(15, PI_OUTPUT);
	//pigpio pulse setup
	//X:25, Y1:23, Y2:24
	gpioSetMode(25, PI_OUTPUT);
	gpioSetMode(23, PI_OUTPUT);
	gpioSetMode(24, PI_OUTPUT);
	//limit-switch setup
	gpioSetMode(5, PI_INPUT);
	gpioWrite(5, 0);
	gpioSetMode(6, PI_INPUT);
	gpioWrite(6, 0);
	gpioSetMode(7, PI_INPUT);
	gpioWrite(7, 0);
	gpioSetMode(8, PI_INPUT);
	gpioWrite(8, 0);
	gpioSetMode(13, PI_INPUT);
	gpioSetMode(19, PI_INPUT);
	gpioSetMode(26, PI_INPUT);
	gpioSetMode(21, PI_INPUT);
 
	CvCapture* capture_robot_side = cvCaptureFromCAM(0);
	CvCapture* capture_human_side = cvCaptureFromCAM(1);
    if(capture_robot_side == NULL){
		std::cout << "Robot Side Camera Capture FAILED" << std::endl;
		return -1;
	 }
	if(capture_human_side ==NULL){
		std::cout << "Human Side Camera Capture FAILED" << std::endl;
		return -1;
	}

	// size設定
    cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	//fps設定
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps);

	// 画像の表示用ウィンドウ生成
	//cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("pack", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE);

	//Create trackbar to change brightness
	int iSliderValue1 = 50;
	cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100);
	//Create trackbar to change contrast
	int iSliderValue2 = 50;
	cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100);
	//pack threthold 0, 50, 120, 220, 100, 220
	int iSliderValuePack1 = 54; //80;
	cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255);
	int iSliderValuePack2 = 84;//106;
	cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255);
	int iSliderValuePack3 = 100;//219;
	cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255);
	int iSliderValuePack4 = 255;//175;
	cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255);
	int iSliderValuePack5 = 0;//29;
	cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255);
	int iSliderValuePack6 = 255;//203;
	cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255);
	//mallet threthold 0, 255, 100, 255, 140, 200
	int iSliderValuemallet1 = 106;
	cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255);
	int iSliderValuemallet2 = 135;
	cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255);
	int iSliderValuemallet3 = 218;//140
	cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255);
	int iSliderValuemallet4 = 255;
	cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255);
	int iSliderValuemallet5 = 0;
	cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255);
	int iSliderValuemallet6 = 105;
	cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255);
	
	// 画像ファイルポインタの宣言
	IplImage* img_robot_side = cvQueryFrame(capture_robot_side);
	IplImage* img_human_side = cvQueryFrame(capture_human_side);
	IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* img_all_round2  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* show_img  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	
	cv::Mat mat_frame1;
	cv::Mat mat_frame2;
	cv::Mat dst_img_v;
	cv::Mat dst_bright_cont;
	int iBrightness  = iSliderValue1 - 50;
	double dContrast = iSliderValue2 / 50.0;
	IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3);
	IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1);

	int rotate_times = 0;
	//IplImage* -> Mat
	mat_frame1 = cv::cvarrToMat(img_robot_side);
	mat_frame2 = cv::cvarrToMat(img_human_side);
	//上下左右を反転。本番環境では、mat_frame1を反転させる
	cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
	cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
	vconcat(mat_frame2, mat_frame1, dst_img_v);

	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
	//画像の膨張と縮小
//	cv::Mat close_img;
//	cv::Mat element(3,3,CV_8U, cv::Scalar::all(255));
//	cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3);
//	cv::imshow("morphologyEx", dst_img_v);
//	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート

	//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
	*img_all_round = dst_bright_cont;

	cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 54, 77, 255, 0, 255);

	cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY);
	cv_Labelling(grayscale_img, tracking_img);

	cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY);

	cvCopy( poly_gray, poly_tmp);
	cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR);

	//画像の膨張と縮小
	//cvMorphologyEx(tracking_img, tracking_img,)

	// 輪郭抽出
	found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

	// ポリライン近似
	polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10);

	cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10);
	poly = (CvSeq *)cvNextTreeNode( &polyIterator);
	printf("sort before by X\n");
	for( i=0; i<poly->total; i++)
	{
		poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//		cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1);
//		cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255));
		std::cout << "x:" << poly_point.x << ", y:" << poly_point.y  << std::endl;
	}
	printf("Poly FindTotal:%d\n",poly->total);

	//枠の座標決定
	//左上 の 壁サイド側 upper_left_f
	//左上 の ゴール寄り  upper_left_g
	//右上 の 壁サイド側 upper_right_f
	//右上 の ゴール寄り  upper_right_g
	//左下 の 壁サイド側 lower_left_f
	//左下 の ゴール寄り  lower_left_g
	//右下 の 壁サイド側 lower_right_f
	//右下 の ゴール寄り  lower_right_g
	CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g,
			lower_left_f, lower_left_g, lower_right_f, lower_right_g,
			robot_goal_left, robot_goal_right;

	CvPoint frame_points[8];
//	if(poly->total == 8){
//		for( i=0; i<8; i++){
//			poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//			frame_points[i] = poly_point;
//		}
//		qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint);
//		printf("sort after by X\n");
//		for( i=0; i<8; i++){
//			std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y  << std::endl;
//		}
//		if(frame_points[0].y < frame_points[1].y){
//			upper_left_f = frame_points[0];
//			lower_left_f = frame_points[1];
//		}
//		else{
//			upper_left_f = frame_points[1];
//			lower_left_f = frame_points[0];
//		}
//		if(frame_points[2].y < frame_points[3].y){
//			upper_left_g = frame_points[2];
//			lower_left_g = frame_points[3];
//		}
//		else{
//			upper_left_g = frame_points[3];
//			lower_left_g = frame_points[2];
//		}
//		if(frame_points[4].y < frame_points[5].y){
//			upper_right_g = frame_points[4];
//			lower_right_g = frame_points[5];
//		}
//		else{
//			upper_right_g = frame_points[5];
//			lower_right_g = frame_points[4];
//		}
//		if(frame_points[6].y < frame_points[7].y){
//			upper_right_f = frame_points[6];
//			lower_right_f = frame_points[7];
//		}
//		else{
//			upper_right_f = frame_points[7];
//			lower_right_f = frame_points[6];
//		}
//	}
//	else{
		printf("Frame is not 8 Point\n");
		upper_left_f = cvPoint(26, 29);
		upper_right_f =  cvPoint(136, 29);
		lower_left_f = cvPoint(26, 220);
		lower_right_f =  cvPoint(136, 220);

		upper_left_g = cvPoint(38, 22);
		upper_right_g = cvPoint(125, 22);
		lower_left_g =  cvPoint(38, 226);
		lower_right_g = cvPoint(125, 226);

		robot_goal_left = cvPoint(60, 226);
		robot_goal_right = cvPoint(93, 226);

//		cvCopy(img_all_round, show_img);
//		cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 ));
//
//		cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 ));

		//while(1){
			//cvShowImage("Now Image", show_img);
			//cvShowImage ("Poly", poly_dst);
			//if(cv::waitKey(1) >= 0) {
				//break;
			//}
		//}
		//return -1;
//	}
	printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y);
	printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y);
	printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y);
	printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y);
	printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y);
	printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y);
	printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y);
	printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y);
	printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y);
	printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y);

    cvReleaseImage(&dst_img_frame);
    cvReleaseImage(&grayscale_img);
    cvReleaseImage(&poly_tmp);
    cvReleaseImage(&poly_gray);

    cvReleaseMemStorage(&contStorage);
    cvReleaseMemStorage(&polyStorage);
	//return 1;
	// Init font
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);
	bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする
	
	while(1){
		//決定ボタンが押されたらスタート
		if(gpioRead(8)==0 && is_pushed_decision_button==1){
			cvCopy(img_all_round, img_all_round2);
			cvCopy(img_all_round, show_img);
			img_robot_side = cvQueryFrame(capture_robot_side);
			img_human_side = cvQueryFrame(capture_human_side);
			//IplImage* -> Mat
			mat_frame1 = cv::cvarrToMat(img_robot_side);
			mat_frame2 = cv::cvarrToMat(img_human_side);
			//上下左右を反転。本番環境では、mat_frame1を反転させる
			cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
			cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
			vconcat(mat_frame2, mat_frame1, dst_img_v);

			iBrightness  = iSliderValue1 - 50;
			dContrast = iSliderValue2 / 50.0;
			dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
			//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
			*img_all_round = dst_bright_cont;
			mat_frame1.release();
			mat_frame2.release();
			dst_img_v.release();
	
			IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);

			cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);
			cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6);
			cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);

			//CvMoments moment_mallet;
			CvMoments moment_pack;
			CvMoments moment_mallet;
			CvMoments moment2_pack;
			//cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img_pack, 1);
			cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img2_pack, 1);

			//cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img_pack, &moment_pack, 0);
			cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img2_pack, &moment2_pack, 0);

			//座標計算
			double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0);
			double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0);
			double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1);
			double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0);
			double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0);
			double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1);
			double gX_before = m10_before/m00_before;
			double gY_before = m01_before/m00_before;
			double gX_after = m10_after/m00_after;
			double gY_after = m01_after/m00_after;
			double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0);
			double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0);
			double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1);
			double gX_now_mallet = m10_mallet/m00_mallet;
			double gY_now_mallet = m01_mallet/m00_mallet;

			int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0
			//円の大きさは全体の1/10で描画
//			cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
//			cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
//			cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2);
//			cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2);
			printf("gX_after: %f\n",gX_after);
			printf("gY_after: %f\n",gY_after);
			printf("gX_before: %f\n",gX_before);
			printf("gY_before: %f\n",gY_before);
			printf("gX_now_mallet: %f\n",gX_now_mallet);
			printf("gY_now_mallet: %f\n",gY_now_mallet);
			int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。
			//パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
			double a_inclination;
			double b_intercept;

			int closest_frequency;

			int target_coordinateX;
			int origin_coordinateY;
			int target_coordinateY;

			double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;

			if(robot_goal_right.x < gX_now_mallet){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 0;//反時計回り
			}
			else if(gX_now_mallet < robot_goal_left.x){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
			}
			else{
				//pwm output for rotate
				//台の揺れを想定してマージンをとる
				if(abs(gX_after - gX_before) <= 1 && abs(gY_after - gY_before) <= 1){//パックが動いてない場合一時停止
					gpioPWM(25, 0);
					closest_frequency = gpioSetPWMfrequency(25, 0);
					a_inclination = 0;
					b_intercept=0;
				}
				else{
					a_inclination = (gY_after - gY_before) / (gX_after - gX_before);
					b_intercept = gY_after - a_inclination * gX_after;
					//一次関数で目標X座標の計算
					if(a_inclination){
						target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination);
					}
					else{
						target_coordinateX = center_line;
					}

					origin_coordinateY = a_inclination * left_frame + b_intercept;

					int rebound_max = 5;
					int rebound_num = 0;

					while(target_coordinateX < left_frame || right_frame < target_coordinateX){
						if(target_coordinateX < left_frame){ //左側の枠での跳ね返り後の軌跡。左枠側平均
							target_coordinateX = 2 * left_frame - target_coordinateX;
							b_intercept -= 2 * ((-a_inclination) * left_frame);
							a_inclination = -a_inclination;
							origin_coordinateY = a_inclination * left_frame + b_intercept;
							if(target_coordinateX < right_frame){
							}
							else{
								//左側の枠から右側の枠に当たるときのY座標
								target_coordinateY = a_inclination * right_frame + b_intercept;
							}
						}
						else if(right_frame < target_coordinateX){ //右側の枠での跳ね返り後の軌跡。右枠側平均
							target_coordinateX = 2 * right_frame - target_coordinateX;
							b_intercept += 2 * (a_inclination * right_frame);
							a_inclination= -a_inclination;
							//cvLine(show_img, cvPoint(right_frame, b_intercept), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
							origin_coordinateY = a_inclination * right_frame + b_intercept;
							if(left_frame < target_coordinateX){
							}
							else{
								//右側の枠から左側の枠に当たるときのY座標
								target_coordinateY = a_inclination * left_frame + b_intercept;
							}
						}
						rebound_num++;
						if(rebound_max < rebound_num){
							//跳ね返りが多すぎる時は、中央を指定
							target_coordinateX = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
							break;
						}
					}
					if(target_coordinateX < center_line && center_line < gX_now_mallet){
						target_direction = 0;
						gpioPWM(25, 128);
						closest_frequency = gpioSetPWMfrequency(25, 1000);
					}
					else if(center_line < target_coordinateX && gX_now_mallet < center_line){
						target_direction = 1;
						gpioPWM(25, 128);
						closest_frequency = gpioSetPWMfrequency(25, 1000);
					}
					else{
						gpioPWM(25, 0);
						closest_frequency = gpioSetPWMfrequency(25, 0);
					}
				}
				printf("a_inclination: %f\n",a_inclination);
				printf("b_intercept: %f\n",b_intercept);
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}

			//pwm output for rotate
			//台の揺れを想定してマージンをとる
			/*if(abs(gX_after - gX_before) <= 1){//パックが動いてない場合一時停止
				gpioPWM(25, 0);
				closest_frequency = gpioSetPWMfrequency(25, 0);
				a_inclination = 0;
				b_intercept=0;
			}
			else if(gY_after-1 < gY_before ){	//packが離れていく時、台の中央に戻る
				a_inclination = 0;
				b_intercept=0;
				//目標値は中央。台のロボット側(4点)からを計算
				double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
				if(center_line + 3 < gX_now_mallet){ //+1 マージン
					gpioPWM(25, 128);
					closest_frequency = gpioSetPWMfrequency(25, 1500);
					target_direction = 0;//反時計回り
				}
				else if(gX_now_mallet < center_line-3){  //-1 マージン
					gpioPWM(25, 128);
					closest_frequency = gpioSetPWMfrequency(25, 1500);
					target_direction = 1;//時計回り
				}
				else{
					gpioPWM(25, 0);
					closest_frequency = gpioSetPWMfrequency(25, 0);
				}
			}
			else{
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				a_inclination = (gY_after - gY_before) / (gX_after - gX_before);
				b_intercept = gY_after - a_inclination * gX_after;
				//一次関数で目標X座標の計算
				if(a_inclination){
					target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination);
				}
				else{
					target_coordinateX = 0;
				}
			}

			printf("a_inclination: %f\n",a_inclination);
			printf("b_intercept: %f\n",b_intercept);

			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;
			origin_coordinateY = a_inclination * left_frame + b_intercept;
			if(target_coordinateX < left_frame){
				cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint(left_frame, origin_coordinateY), cvScalar(0,255,255), 2);
			}
			else if(right_frame < target_coordinateX){
				cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint(right_frame, origin_coordinateY), cvScalar(0,255,255), 2);
			}
			else{
				cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
			}

			int rebound_max = 5;
			int rebound_num = 0;

			while(target_coordinateX < left_frame || right_frame < target_coordinateX){
				if(target_coordinateX < left_frame){ //左側の枠での跳ね返り後の軌跡。左枠側平均
					target_coordinateX = 2 * left_frame - target_coordinateX;
					b_intercept -= 2 * ((-a_inclination) * left_frame);
					a_inclination = -a_inclination;
					origin_coordinateY = a_inclination * left_frame + b_intercept;
					if(target_coordinateX < right_frame){
						cvLine(show_img, cvPoint(left_frame, origin_coordinateY), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
					}
					else{
						//左側の枠から右側の枠に当たるときのY座標
						target_coordinateY = a_inclination * right_frame + b_intercept;
						cvLine(show_img, cvPoint(left_frame, origin_coordinateY), cvPoint(right_frame, target_coordinateY), cvScalar(0,255,255), 2);
					}
				}
				else if(right_frame < target_coordinateX){ //右側の枠での跳ね返り後の軌跡。右枠側平均
					target_coordinateX = 2 * right_frame - target_coordinateX;
					b_intercept += 2 * (a_inclination * right_frame);
					a_inclination= -a_inclination;
					//cvLine(show_img, cvPoint(right_frame, b_intercept), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
					origin_coordinateY = a_inclination * right_frame + b_intercept;
					if(left_frame < target_coordinateX){
						cvLine(show_img, cvPoint(right_frame, origin_coordinateY), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
					}
					else{
						//右側の枠から左側の枠に当たるときのY座標
						target_coordinateY = a_inclination * left_frame + b_intercept;
						cvLine(show_img, cvPoint(right_frame, origin_coordinateY), cvPoint(left_frame, target_coordinateY), cvScalar(0,255,255), 2);
					}
				}
				rebound_num++;
				if(rebound_max < rebound_num){
					//跳ね返りが多すぎる時は、中央を指定
					target_coordinateX = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
					break;
				}
			}

			printf("target_coordinateX: %d\n",target_coordinateX);
			//防御ラインの描画
			cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
			//マレットの動きの描画
			cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
			//cvPutText (show_img, to_c_char((int)gX_now_mallet), cvPoint(460,30), &font, cvScalar(220,50,50));
			//cvPutText (show_img, to_c_char((int)target_coordinateX), cvPoint(560,30), &font, cvScalar(50,220,220));

			int amount_movement = target_coordinateX - gX_now_mallet;

			//reacted limit-switch and target_direction rotate
//			if(gpioRead(6) == 1){//X軸右
//				gpioPWM(25, 128);
//				closest_frequency = gpioSetPWMfrequency(25, 1500);
//				target_direction = 0;//反時計回り
//				printf("X軸右リミット!反時計回り\n");
//			}
//			else
			if(gpioRead(26) == 1){//X軸左
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
				printf("X軸左リミット!時計回り\n");
			}
			else if(gpioRead(5) == 1){//Y軸右上
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 0);
				printf("Y軸右上リミット!時計回り\n");
			}
			else if(gpioRead(13) == 1){//Y軸右下
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 1);
				printf("Y軸右下リミット!反時計回り\n");
			}
			else if(gpioRead(19) == 1){//Y軸左下
				gpioPWM(24, 128);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 0);
				printf("Y軸左下リミット!時計回り\n");
			}

			else if(gpioRead(21) == 1){//Y軸左上
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 1);
				printf("Y軸左上リミット!反時計回り\n");
			}
			else{
				//Y軸固定のため
				gpioSetPWMfrequency(23, 0);
				gpioSetPWMfrequency(24, 0);

				if(amount_movement > 0){
					target_direction = 1;//時計回り
				}
				else if(amount_movement < 0){
					target_direction = 0;//反時計回り
				}
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			else{
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 0);
			}
			printf("setting_frequency: %d\n", closest_frequency);*/

			// 指定したウィンドウ内に画像を表示する
			//cvShowImage("Previous Image", img_all_round2);
//			cvShowImage("Now Image", show_img);
//			cvShowImage("pack", dst_img_pack);
//			cvShowImage("mallet", dst_img_mallet);
//			cvShowImage ("Poly", poly_dst);

			cvReleaseImage (&dst_img_mallet);
			cvReleaseImage (&dst_img_pack);
			cvReleaseImage (&dst_img2_mallet);
			cvReleaseImage (&dst_img2_pack);

			if(cv::waitKey(1) >= 0) {
				break;
			}
		}
		else{ //リセット信号が来た場合
			is_pushed_decision_button = 0;
		}
    }
    
    gpioTerminate();
    
    cvDestroyAllWindows();
	
	//Clean up used CvCapture*
	cvReleaseCapture(&capture_robot_side);
	cvReleaseCapture(&capture_human_side);
    //Clean up used images
	cvReleaseImage(&poly_dst);
	cvReleaseImage(&tracking_img);
    cvReleaseImage(&img_all_round);
    cvReleaseImage(&img_human_side);
    cvReleaseImage(&img_all_round2);
    cvReleaseImage(&show_img);
    cvReleaseImage(&img_robot_side);

    return 0;
}
コード例 #16
0
int main(int argc, char* argv[]) {
    int counter;
    wchar_t auxstr[20];

    printf("start!\n");

    printf("PUCK MINH: %d\n",minH);
    printf("PUCK MAXH: %d\n",maxH);
    printf("PUCK MINS: %d\n",minS);
    printf("PUCK MAXS: %d\n",maxS);
    printf("PUCK MINV: %d\n",minV);
    printf("PUCK MAXV: %d\n",maxV);
    printf("ROBOT MINH: %d\n",RminH);
    printf("ROBOT MAXH: %d\n",RmaxH);
    printf("ROBOT MINS: %d\n",RminS);
    printf("ROBOT MAXS: %d\n",RmaxS);
    printf("ROBOT MINV: %d\n",RminV);
    printf("ROBOT MAXV: %d\n",RmaxV);
    printf("FPS: %d\n",fps);

    // 読み込み画像ファイル名
    char imgfile[] = "camera/photodir/capmallet1.png";
    char imgfile2[] = "camera/photodir/capmallet2.png";

    // 画像の読み込み
    img = cvLoadImage(imgfile, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH);
    img2 = cvLoadImage(imgfile2, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH);

    // 画像の表示用ウィンドウ生成
    cvNamedWindow("circle_sample", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("circle_sample2", CV_WINDOW_AUTOSIZE);

    //cvNamedWindow("cv_ColorExtraction");

    // Init font
    cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);

    IplImage* dst_img_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    IplImage* dst_img_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    IplImage* dst_img2_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    //白抽出0,255,0,15,240,255
    //黒抽出0, 255, 0, 50, 0, 100
    //青検出0, 255, 50, 200, 100, 180
    //cv_ColorExtraction(img, dst_img_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180);
    cv_ColorExtraction(img, dst_img_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100);
    cv_ColorExtraction(img2, dst_img2_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180);
    cv_ColorExtraction(img2, dst_img2_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100);

    //CvMoments moment_mallett;
    CvMoments moment_pack;
    CvMoments moment2_mallett;
    CvMoments moment2_pack;

    //cvSetImageCOI(dst_img_mallett, 1);
    cvSetImageCOI(dst_img_pack, 1);
    cvSetImageCOI(dst_img2_mallett, 1);
    cvSetImageCOI(dst_img2_pack, 1);

    //cvMoments(dst_img_mallett, &moment_mallett, 0);
    cvMoments(dst_img_pack, &moment_pack, 0);
    cvMoments(dst_img2_mallett, &moment2_mallett, 0);
    cvMoments(dst_img2_pack, &moment2_pack, 0);

    //座標計算
    double m00_before = cvGetSpatialMoment(&moment_pack, 0, 0);
    double m10_before = cvGetSpatialMoment(&moment_pack, 1, 0);
    double m01_before = cvGetSpatialMoment(&moment_pack, 0, 1);
    double m00_after = cvGetSpatialMoment(&moment2_pack, 0, 0);
    double m10_after = cvGetSpatialMoment(&moment2_pack, 1, 0);
    double m01_after = cvGetSpatialMoment(&moment2_pack, 0, 1);
    double gX_before = m10_before/m00_before;
    double gY_before = m01_before/m00_before;
    double gX_after = m10_after/m00_after;
    double gY_after = m01_after/m00_after;
    double m00_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 0);
    double m10_mallett = cvGetSpatialMoment(&moment2_mallett, 1, 0);
    double m01_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 1);
    double gX_now_mallett = m10_mallett/m00_mallett;
    double gY_now_mallett = m01_mallett/m00_mallett;

    cvCircle(img2, cvPoint((int)gX_before, (int)gY_before), 50, CV_RGB(0,0,255), 6, 8, 0);

    cvLine(img2, cvPoint((int)gX_before, (int)gY_before), cvPoint((int)gX_after, (int)gY_after), cvScalar(0,255,0), 2);

    int target_destanceY = 480 - 30;//Y座標の距離を一定にしている。ディフェンスライン。
    //パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
    double a_inclination = (gY_after - gY_before) / (gX_after - gX_before);
    double b_intercept = gY_after - a_inclination * gX_after;
    printf("gX_after: %f\n",gX_after);
    printf("gY_after: %f\n",gY_after);
    printf("gX_before: %f\n",gX_before);
    printf("gY_before: %f\n",gY_before);
    printf("a_inclination: %f\n",a_inclination);
    printf("b_intercept: %f\n",b_intercept);

    int target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination);
    printf("target_coordinateX: %d\n",target_coordinateX);
    cvLine(img2, cvPoint((int)gX_after, (int)gY_after), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
    cvLine(img2, cvPoint(640, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
    cvLine(img2, cvPoint((int)gX_now_mallett, (int)gY_now_mallett), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
    cvPutText (img2, to_c_char((int)gX_now_mallett), cvPoint(460,30), &font, cvScalar(220,50,50));
    cvPutText (img2, to_c_char((int)target_coordinateX), cvPoint(560,30), &font, cvScalar(50,220,220));
    // 指定したウィンドウ内に画像を表示する
    cvShowImage("circle_sample", img);
    cvShowImage("circle_sample2", img2);

	//pwm output test
	if(gpioInitialise() < 0) return 1;
	gpioSetMode(18, PI_OUTPUT);
	gpioPWM(18, 128);
	gpioSetPWMfrequency(18, 1000);
	/*if(wiringPiSetupGpio()==-1){
		printf("cannotsetup gpio.\n" );
		return 1;
	}
	pinMode(18,PWM_OUTPUT);
	pwmSetMode(PWM_MODE_MS);
	pwmSetClock(64);
	pwmSetRange(100);
	pwmWrite(18,50);*/

    while(1) {
        if(cv::waitKey(30) >= 0) {
            break;
        }
    }

    //Clean up used images
    cvReleaseImage(&img);
//    cvReleaseImage (&dst_img);

    cvDestroyAllWindows() ;

    return 0;
}
コード例 #17
0
ファイル: cxcore.hpp プロジェクト: mrkhnstn/MotorsAndSensors
 void set_coi(int coi) { cvSetImageCOI(image,coi); }
コード例 #18
0
void THISCLASS::OnStep() {
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	//Check the images
	if (! inputimage)
	{
		AddError(wxT("No input Image"));
		return;
	}
	if (inputimage->nChannels != 3)
	{
		AddError(wxT("Input image has not 3 channels."));
		return;
	}
	if (! mBackgroundImage)
	{
		AddError(wxT("Background image not accessible"));
		return;
	}
	if ((cvGetSize(inputimage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(inputimage).width != cvGetSize(mBackgroundImage).width))
	{
		AddError(wxT("Input and background images have not the same dimension"));
		return;
	}

	//Check for the color system of the input image (The loaded image is BGR, OpenCV default) and convert the background respectively
	if (strncmp(mCore->mDataStructureImageColor.mImage->channelSeq, mBackgroundImage->channelSeq, 3))
	{
		//Make a temporary clone of the image in 3 seperate channels
		IplImage* tmpImage[3];
		for (int i = 0;i < 3;i++)
			tmpImage[i] = cvCreateImage(cvGetSize(mBackgroundImage), 8, 1);
		cvSplit(mBackgroundImage, tmpImage[0], tmpImage[1], tmpImage[2], NULL);
		CvScalar tmpBackgroundMean = mBackgroundImageMean;
		//Modify the sequence of the channels in the background
		for (int i = 0;i < 3;i++)
			//If the channel is not the same, search for the corresponding channel to copy, else copy the channel directly
			if (inputimage->channelSeq[i] != mBackgroundImage->channelSeq[i])
				for (int j = 0;j < 3;j++)
					if (inputimage->channelSeq[i] == mBackgroundImage->channelSeq[j])
					{
						cvSetImageCOI(mBackgroundImage, i + 1);
						cvCopy(tmpImage[j], mBackgroundImage);
						//Remove the COI
						cvSetImageCOI(mBackgroundImage, 0);
						mBackgroundImageMean.val[i] = tmpBackgroundMean.val[j];
					}
		strcpy(mBackgroundImage->channelSeq, inputimage->channelSeq);
		for (int i = 0; i < 3;i++)
			cvReleaseImage(&(tmpImage[i]));
	}

	try {
		// Correct the tmpImage with the difference in image mean
		if (mCorrectMean)
		{
			CvScalar tmpScalar = cvAvg(inputimage);
			cvAddS(inputimage, cvScalar(mBackgroundImageMean.val[0] - tmpScalar.val[0], mBackgroundImageMean.val[1] - tmpScalar.val[1], mBackgroundImageMean.val[2] - tmpScalar.val[2]), inputimage);
		}

		// Background Substraction
		cvAbsDiff(inputimage, mBackgroundImage, inputimage);
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(inputimage);
	}
}
コード例 #19
0
ファイル: main.cpp プロジェクト: finalx/finalx-labs-all
int main(int argc, char** argv)
{
    IplImage *mOrigFrame = NULL;
    IplImage *resizedImg = NULL;
    IplImage *detectedImg = NULL;
    IplImage *faceNotFoundImg = NULL;
    IplImage *thresholdedImg = NULL;    //二级化
    IplImage *histogramEqualizatedImg = NULL; //histogram equalization

    IplImage *yuvImg = NULL;
    IplImage *maskImage = NULL;
    //these channels some times stands for R,G,B or Y,Cr,Cb channel
    IplImage *channel_0 = NULL;
    IplImage *channel_1 = NULL;
    IplImage *channel_2 = NULL;

    CvRect faceRect;

    for(int i =0; i < argc; i++){
        LOGD("argv[%d] = %s", i, argv[i]);
    }
    switch (argc) {
        case 2:
            mCascadeFile = argv[1];
        case 3:
            ;
        default:
            break;
    }

    mCvCapture = cvCreateCameraCapture(CV_CAP_ANY);

    mFrame = cvCreateImage(getCaptureSize(mCvCapture), IPL_DEPTH_8U, 3);  
    DUMP_IMAGE(mFrame);
    yuvImg = cvCreateImage(cvGetSize(mFrame), mFrame->depth, mFrame->nChannels);
    DUMP_IMAGE(yuvImg);
    faceNotFoundImg = cvLoadImage("./face_not_found.bmp");
    DUMP_IMAGE(faceNotFoundImg);
    maskImage = cvCreateImage(cvGetSize(mFrame), IPL_DEPTH_8U, 1);
    DUMP_IMAGE(maskImage);
    channel_0 = cvCreateImage(cvGetSize(mFrame), IPL_DEPTH_8U, 1);
    DUMP_IMAGE(channel_0);
    channel_1 = cvCreateImage(cvGetSize(mFrame), IPL_DEPTH_8U, 1);
    channel_2 = cvCreateImage(cvGetSize(mFrame), IPL_DEPTH_8U, 1);

    // Create a window in which the captured images will be presented
    //cvNamedWindow( "window_0", CV_WINDOW_AUTOSIZE );
    cvNamedWindow("window_1", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("window_2", CV_WINDOW_AUTOSIZE);
    cvMoveWindow("window_1", 600, 100);

DURATION_START;
    mFaceCascade = (CvHaarClassifierCascade*)cvLoad(mCascadeFile, 0, 0, 0 );
DURATION_STOP("load cascade file");
    
    int frame_count = 0;
    bool detect_res = false;
    float scale_ratio = 0;
    

    duration_start();
    for(; ; frame_count ++) {
        mOrigFrame= cvQueryFrame(mCvCapture);
        if (mOrigFrame) {
            LOGD("capture %d frame", frame_count);
            cvCopy(mOrigFrame, mFrame);
        }

        if(mFrame->origin == IPL_ORIGIN_BL) {
            LOGD("need FLIP");
            cvFlip(mFrame, mFrame, 0);
        }

        //cvShowImage("window_0", mFrame);

        cvZero(maskImage);
        cvCvtColor(mFrame, yuvImg, CV_BGR2YCrCb);

        cvSetImageCOI(yuvImg,2);//channel Cr
        cvCopy(yuvImg, channel_1);
        cvSetImageCOI(yuvImg, 3);//channel Cb
        cvCopy(yuvImg, channel_2);
        cvSetImageCOI(yuvImg,0);//reset

        for(int i = 0; i < mFrame->height; i++) {
            for(int j = 0; j < mFrame->width; j ++) {
                if( ((unsigned char *)(channel_1->imageData + i * channel_1->widthStep))[j] >= 123
                 && ((unsigned char *)(channel_1->imageData + i * channel_1->widthStep))[j] <= 175 
                 && ((unsigned char *)(channel_2->imageData + i * channel_2->widthStep))[j] >= 93
                 && ((unsigned char *)(channel_2->imageData + i * channel_2->widthStep))[j] <= 133) {

                    ((unsigned char *)(maskImage->imageData + i * maskImage->widthStep))[j] = 255;
                 }
            }
        }

        cvShowImage("window_1", mFrame);
        cvCopy(mFrame, yuvImg, maskImage);
        //cvShowImage("window_2", yuvImg);
        cvShowImage("window_2", maskImage);



        //resizedImg = resizeImage(mFrame);
        //scale_ratio = ((float) (mFrame->width)) / resizedImg->width;
        //LOGD("scale_ratio = %f", scale_ratio);
        
/*
//DURATION_START;
        detect_res = face_detect(resizedImg, mFaceCascade, &faceRect);
//DURATION_STOP("face_detect");

        if(detect_res){
            //LOGD("detected zone:(%d,%d) with %dx%d", faceRect.x, faceRect.y, faceRect.width, faceRect.height);
            detectedImg = cvCreateImage(cvSize(faceRect.width, faceRect.height), IPL_DEPTH_8U, 1);
            cvSetImageROI(resizedImg, faceRect);
            cvResize(resizedImg, detectedImg);
            cvResetImageROI(resizedImg);

            thresholdedImg = cvCreateImage(cvGetSize(detectedImg), detectedImg->depth, detectedImg->nChannels);
            histogramEqualizatedImg = cvCreateImage(cvGetSize(detectedImg), detectedImg->depth, detectedImg->nChannels);
            cvThreshold(detectedImg, thresholdedImg, 80, 255, CV_THRESH_BINARY);    //二级化``
            cvEqualizeHist(detectedImg, histogramEqualizatedImg);

            cvShowImage("window_1", histogramEqualizatedImg);
            
            cvReleaseImage(&resizedImg);
            cvReleaseImage(&detectedImg);
            cvReleaseImage(&thresholdedImg);
            cvReleaseImage(&histogramEqualizatedImg);
        }else{
            //LOGD("face NOT founded");
            cvShowImage("window_1", faceNotFoundImg);
        }
*/

        //press 'esc' to stop
        if ( (cvWaitKey(5) & 255) == 27 ) break;

        if(frame_count % 5 == 0) {
            LOGD("frame rate:%f", ((float) 5) / duration_stop() * 1000/*ms*/);
            duration_start();
        }
    }
    
    //cvDestroyWindow("window_0");
    cvDestroyWindow("window_1");
    cvReleaseCapture(&mCvCapture);
    return 0;
}
コード例 #20
0
ファイル: camera.cpp プロジェクト: JackFan-Z/MetroVision
/** Returns a CvSeq (An OpenCV sequence) of Tetris pieces detected in an image.
   Based on the OpenCV example of identifying a square.  Modified to detect
   L-shaped Tetris pieces.  Effectiveness dependent upon thresholds of edge
   dectection and camera being positioned orthogonal to the Tetris piece.
 */
CvSeq* Camera::findTetris( IplImage* img, CvMemStorage* storage )
{
	thresh = 50;
    CvSeq* contours;
    int i, c, l, N = 11;
    CvSize sz = cvSize( img->width & -2, img->height & -2 );

	/// Copy of image so that the detection is non-destructive
    IplImage* timg = cvCloneImage( img );

	/// Gray scale needed
	IplImage* gray = cvCreateImage( sz, 8, 1 );

	/// Smaller version to do scaling
    IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
    IplImage* tgray;
    CvSeq* result;
    double s, t;

    // create empty sequence that will contain points -
    /// 6 points per tetris piece (the vertices)
    CvSeq* tetrisPieces = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );

    // select the maximum region of interest (ROI) in the image
    // with the width and height divisible by 2.  What is the biggest
    // size of the object.
    cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));

    // down-scale and upscale the image to filter out the noise
	// I get the filter, but why down and upscale?
    cvPyrDown( timg, pyr, 7 );
    cvPyrUp( pyr, timg, 7 );
    tgray = cvCreateImage( sz, 8, 1 );

    /// find pieces in every color plane of the image
    for( c = 0; c < 3; c++ )
    {
        /// extract the c-th color plane
        cvSetImageCOI( timg, c+1 );
        cvCopy( timg, tgray, 0 );

        /// try several threshold levels
        for( l = 0; l < N; l++ )
        {
            /// hack: use Canny instead of zero threshold level.
            /// Canny helps to catch tetrisPieces with gradient shading
            if( l == 0 )
            {
                // apply Canny. Take the upper threshold from slider
                // and set the lower to 0 (which forces edges merging)
                cvCanny( tgray, gray, 50, 120, 5 );
                // dilate canny output to remove potential
                // holes between edge segments
                cvDilate( gray, gray, 0, 1 );
            }
            else
            {
                // apply threshold if l!=0:
                //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
            }

            // find contours and store them all as a list
            cvFindContours( gray, storage, &contours, sizeof(CvContour),
                CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

            // test each contour
            while( contours )
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                result = cvApproxPoly( contours, sizeof(CvContour), storage,
                    CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );

				/* Tetris pieces have 6 vertices.  The approximation of large
				 * area is used to filter out "noisy contours."
                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation*/
                if( result->total == 6 &&
                    fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 1000 &&
					fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 10000 )
                {
                    s = 0;

                    for( i = 0; i < 7; i++ )
                    {
                        // find minimum angle between joint
                        // edges (maximum of cosine)
                        if( i >= 2 )
                        {
                            t = fabs(angle(
                            (CvPoint*)cvGetSeqElem( result, i ),
                            (CvPoint*)cvGetSeqElem( result, i-2 ),
                            (CvPoint*)cvGetSeqElem( result, i-1 )));
                            s = s > t ? s : t;
                        }
                    }

                    // if cosines of all angles are small
                    // (all angles are ~90 degree) then write quandrange
                    // vertices to resultant sequence
                    if( s < 0.3 )
                        for( i = 0; i < 6; i++ )
                            cvSeqPush( tetrisPieces,
                                (CvPoint*)cvGetSeqElem( result, i ));
                }

                // take the next contour
                contours = contours->h_next;
            }
        }
    }

    // release all the temporary images
    cvReleaseImage( &gray );
    cvReleaseImage( &pyr );
    cvReleaseImage( &tgray );
    cvReleaseImage( &timg );

    return tetrisPieces;
}
コード例 #21
0
int RectDetector::FindShape(const IplImage* img,std::vector<RectShape>& shapes)
{
	if( !img )
	{
        return -1;
	}

	int thresh = 50;
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* contours;
	int i, c, l, N = 11;
	CvSize sz = cvSize( img->width & -2, img->height & -2 ); 
 
	IplImage* timg = cvCloneImage( img );
	IplImage* gray = cvCreateImage( sz, 8, 1 );
	IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
	IplImage* tgray;
	CvSeq* result;
	double s, t;
	// 创建一个空序列用于存储轮廓角点
	//CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );

	cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));
	// 过滤噪音
	//cvPyrDown( timg, pyr, 5 );
	//cvPyrUp( pyr, timg, 5 );
	tgray = cvCreateImage( sz, 8, 1 );
	//cvShowImage("test",timg);
 

	// 提取 the c-th color plane
	cvSetImageCOI( timg, 0 );
	cvCopy( timg, tgray, 0 );

	// 尝试各种阈值提取得到的(N=11)
	for( l = 0; l < N; l++ )
	{
		// apply Canny. Take the upper threshold from slider
		// Canny helps to catch squares with gradient shading  
		if( l == 0 )
		{
			cvCanny( tgray, gray, 0, thresh, 5 );
			//使用任意结构元素膨胀图像
			cvDilate( gray, gray, 0, 2 );
			//cvShowImage("test",gray);
		}
		else
		{
			// apply threshold if l!=0:
			cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
 
		}

		// 找到所有轮廓并且存储在序列中
		cvFindContours( gray, storage, &contours, sizeof(CvContour),
		CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

		// 遍历找到的每个轮廓contours
		while( contours )
		{
			//用指定精度逼近多边形曲线
			result = cvApproxPoly( contours, sizeof(CvContour), storage,
			CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
                  

			if( result->total == 4 &&
				fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 500 &&
				fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 100000 &&
				cvCheckContourConvexity(result) )
			{
				s = 0;

				for( i = 0; i < 5; i++ )
				{
					// find minimum angle between joint edges (maximum of cosine)
					if( i >= 2 )
					{
						t = fabs(this->CalcAngle((CvPoint*)cvGetSeqElem( result, i ),
										(CvPoint*)cvGetSeqElem( result, i-2 ),
										(CvPoint*)cvGetSeqElem( result, i-1 )));
						s = s > t ? s : t;
					}
				}

				 // if 余弦值 足够小,可以认定角度为90度直角
				 //cos0.1=83度,能较好的趋近直角
				if( s < 0.1 ) 
				{
					RectShape RectShape(*(CvPoint*)cvGetSeqElem( result, 0 ),*(CvPoint*)cvGetSeqElem( result, 1 ),*(CvPoint*)cvGetSeqElem( result, 2 ),*(CvPoint*)cvGetSeqElem( result, 3 ));
					
					bool isExist = false;
					//去重
					for(int j = 0 ; j < shapes.size(); ++j)
					{
						if(RectShape == shapes[j] )
						{
							isExist = true;
							continue;
						}
						//remove the rectangle which contains others
						if(RectShape.isNear(shapes[j]))
						{
							if(RectShape.ctPoint.x > shapes[j].ctPoint.x || RectShape.ctPoint.y > shapes[j].ctPoint.y)
							{
								isExist = true;
								continue;
							}
							else
							{
								shapes[j] = RectShape;
							}
						}
	 

					}

					if(!isExist)
					{
						shapes.push_back(RectShape);
					}
					
				}
			}

			// 继续查找下一个轮廓
			contours = contours->h_next;
		

		}
	}

	cvReleaseImage( &gray );
	cvReleaseImage( &pyr );
	cvReleaseImage( &tgray );
	cvReleaseImage( &timg );
	cvClearMemStorage( storage );
	


	return shapes.size();
}
コード例 #22
0
ファイル: Utils.cpp プロジェクト: corvofeng/cube
CvSeq* findSquares4(IplImage *img, CvMemStorage* storage)
{
	CvSeq* contours;
	int i, c, l, N = 11;
	int thresh = 50;
	CvSize sz = cvSize(img->width & -2, img->height & -2);

	IplImage* timg = cvCloneImage(img);
	IplImage* gray = cvCreateImage(sz, 8, 1);
	IplImage* pyr = cvCreateImage(cvSize(sz.width / 2, sz.height / 2), 8, 3);
	IplImage* tgray;

	CvSeq* result;

	// 创建一个空序列用处储存轮廓角点
	CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);

	cvSetImageROI(timg, cvRect(0, 0, sz.width, sz.height));

	// 过滤噪音
	//cvPyrDown(timg, pyr, 7);
	tgray = cvCreateImage(sz, 8, 1);
	//cvPyrUp(pyr, timg, 7);

        // 红绿蓝3色分别提取
	for (int c = 0; c < 3; c++) {
		cvSetImageCOI(timg, c + 1);
		cvCopy(timg, tgray, 0);

                // 尝试各种阈值提取
		for (int l = 0; l < N; l++) {
			if (l == 0) {
				cvCanny(tgray, gray, 0, thresh, 5);
				cvDilate(gray, gray, 0, 1);
			} else {
				cvThreshold(tgray, gray, (l + 1) * 255 / N, 255,
				                CV_THRESH_BINARY);
			}

                        // 找到轮廓并存储在队列中
			cvFindContours(gray, storage, &contours,
			                sizeof(CvContour), CV_RETR_LIST,
			                CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));


                        // 遍历每一个轮廓
			while (contours) {
                                // 使用指定的精度逼近多边形曲线
				result = cvApproxPoly(contours,
				                sizeof(CvContour), storage,
				                CV_POLY_APPROX_DP,
				                cvContourPerimeter(contours) * 0.02, 0);
				if (result->total == 4
				                && fabs(
				                                cvContourArea(
				                                                result,
				                                                CV_WHOLE_SEQ))
				                                > 500
				                && fabs(
				                                cvContourArea(
				                                                result,
				                                                CV_WHOLE_SEQ))
				                                < 100000
				                && cvCheckContourConvexity(
				                                result))
				{

					double s = 0, t;
					for (int i = 0; i < 5; i++) {
						if (i >= 2) {
							t =
							                fabs(
							                                angle(
							                                                (CvPoint*) cvGetSeqElem(
							                                                                result,
							                                                                i),
							                                                (CvPoint *) cvGetSeqElem(
							                                                                result,
							                                                                i
							                                                                                - 2),
							                                                (CvPoint *) cvGetSeqElem(
							                                                                result,
							                                                                i
							                                                                                - 1)));
							s = s > t ? s : t;
						}
					}

                                        // 如果余弦值足够小, 可以认定角度为90度, 是直角
					if (s < 0.08) {
						for (int i = 0; i < 4; i++) {
							cvSeqPush(squares,
							                (CvPoint *) cvGetSeqElem(
							                                result,
							                                i));
						}
					}
				}
				contours = contours->h_next;
			}

		}
	}

	cvReleaseImage(&gray);
	cvReleaseImage (&pyr);
	cvReleaseImage(&tgray);
	cvReleaseImage(&timg);

	return squares;
}
コード例 #23
0
ファイル: iplimage.cpp プロジェクト: thenoseman/ruby-opencv
/*
 * Reset COI setting. Same as IplImage#coi = 0. Return self.
 */
VALUE
rb_reset_coi(VALUE self)
{
  cvSetImageCOI(IPLIMAGE(self), 0);
  return self;
}
コード例 #24
0
ファイル: testApp.cpp プロジェクト: nagyistoce/gasubasu
//--------------------------------------------------------------
void testApp::update(){
	

    bool bNewFrame = false;

	#ifdef _USE_LIVE_VIDEO
       vidGrabber.grabFrame();
	   bNewFrame = vidGrabber.isFrameNew();
    #else
        vidPlayer.idleMovie();
        bNewFrame = vidPlayer.isFrameNew();
	#endif

	if (bNewFrame){

		#ifdef _USE_LIVE_VIDEO
            colorImg.setFromPixels(vidGrabber.getPixels(), cw,ch);
	    #else
            colorImg.setFromPixels(vidPlayer.getPixels(), cw,ch);
        #endif

		
		kx = (float) ofGetWidth()  / cw;
		ky = (float) ofGetHeight() / ch;
		
		cvSmooth(colorImg.getCvImage(), medianImg.getCvImage(), CV_MEDIAN, medianValue, medianValue);
		medianImg.flagImageChanged();
				
		grayImage = medianImg;
		
		cvCvtColor(colorImg.getCvImage(), hsvImage.getCvImage(), CV_RGB2HSV);
		hsvImage.flagImageChanged();
		
		cvSetImageCOI(hsvImage.getCvImage(), 2);
		cvCopy(hsvImage.getCvImage(), satImage.getCvImage());
		satImage.flagImageChanged();
		cvSetImageCOI(hsvImage.getCvImage(), 0);
		
		//cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_BLUR, 3, 3, 0, 0);
		
		cvAdaptiveThreshold(grayImage.getCvImage(), trsImage.getCvImage(), 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, adaptiveThreshValue);
		//cvCanny(trsImage.getCvImage(), trsImage.getCvImage(), sb, sb*4, 3);
		trsImage.flagImageChanged();
		
		
//		cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_MEDIAN, 7, 7);
		
//		cvSmooth( iplImage, iplImage, CV_BLUR, br, br, 0, 0 );
//		cvSmooth( iplImage, iplImage, CV_MEDIAN, 7, 7);
		cvCanny(  grayImage.getCvImage(), cannyImage.getCvImage(), cannyThresh1Value, cannyThresh2Value, cannyApertureValue);
		cannyImage.flagImageChanged();
			
		//cvPyrMeanShiftFiltering(colorImg.getCvImage(), colorImg.getCvImage(), 20, 40, 2);
		
		if (mode==MODE_DRAWING) {

			if (draw_edges) {

				#if PROBABILISTIC_LINE
					lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, lineThreshValue, lineMinLengthValue, lineMaxGapValue );
				#else
					lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );
				#endif
			
			}
			
			if (draw_contours || draw_approx) {
				cvFindContours(cannyImage.getCvImage(), edgesStorage, &edgeContours);
				
				CvSeq* contour = edgeContours;
				while (contour!=NULL) {
					for (int j = 0; j < contour->total; j++){
						CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
						p1->x = p1->x*(float)kx;
						p1->y = p1->y*(float)ky;
					}
					contour = contour->h_next;
				}
				
			}

			if (draw_fills) {
				cvFindContours(trsImage.getCvImage(), fillsStorage, &fillContours);
				
				CvSeq* contour = fillContours;
				while (contour!=NULL) {
					for (int j = 0; j < contour->total; j++){
						CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j);
						p1->x = p1->x*(float)kx;
						p1->y = p1->y*(float)ky;
					}
					contour = contour->h_next;
				}
			}
		}

	}
	
	
	// update scope
//	float* rand = new float[50];
//	for(int i=0 ;i<50; i++){
//		rand[i] = ofRandom(-1.0,1);
//		
//	}
//	
//	gui->update(scope, kofxGui_Set_FloatArray, rand, sizeof(float*));
//	
//	// make 3 seconds loop
//	float f = ((ofGetElapsedTimeMillis()%3000) / 3000.0);
//	gui->update(points, kofxGui_Set_Float, &f, sizeof(float));

}
コード例 #25
0
int main(int argc, char* argv[]) {
    int counter;
    wchar_t auxstr[20];

    printf("start!\n");

    printf("PUCK MINH: %d\n",minH);
    printf("PUCK MAXH: %d\n",maxH);
    printf("PUCK MINS: %d\n",minS);
    printf("PUCK MAXS: %d\n",maxS);
    printf("PUCK MINV: %d\n",minV);
    printf("PUCK MAXV: %d\n",maxV);
    printf("ROBOT MINH: %d\n",RminH);
    printf("ROBOT MAXH: %d\n",RmaxH);
    printf("ROBOT MINS: %d\n",RminS);
    printf("ROBOT MAXS: %d\n",RmaxS);
    printf("ROBOT MINV: %d\n",RminV);
    printf("ROBOT MAXV: %d\n",RmaxV);
    printf("FPS: %d\n",fps);

	//pwm initialize
	if(gpioInitialise() < 0) return 1;

    // 読み込み画像ファイル名
    char imgfile[] = "camera/photodir/capmallet1.png";
    char imgfile2[] = "camera/photodir/capmallet2.png";

    // 画像の読み込み
    img = cvLoadImage(imgfile, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH);
    img2 = cvLoadImage(imgfile2, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH);

    // 画像の表示用ウィンドウ生成
    cvNamedWindow("circle_sample", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("circle_sample2", CV_WINDOW_AUTOSIZE);

    //cvNamedWindow("cv_ColorExtraction");

    // Init font
    cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);

    IplImage* dst_img_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    IplImage* dst_img_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    IplImage* dst_img2_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    //白抽出0,255,0,15,240,255
    //黒抽出0, 255, 0, 50, 0, 100
    //青検出0, 255, 50, 200, 100, 180
    //cv_ColorExtraction(img, dst_img_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180);
    cv_ColorExtraction(img, dst_img_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100);
    cv_ColorExtraction(img2, dst_img2_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180);
    cv_ColorExtraction(img2, dst_img2_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100);

    //CvMoments moment_mallett;
    CvMoments moment_pack;
    CvMoments moment2_mallett;
    CvMoments moment2_pack;

    //cvSetImageCOI(dst_img_mallett, 1);
    cvSetImageCOI(dst_img_pack, 1);
    cvSetImageCOI(dst_img2_mallett, 1);
    cvSetImageCOI(dst_img2_pack, 1);

    //cvMoments(dst_img_mallett, &moment_mallett, 0);
    cvMoments(dst_img_pack, &moment_pack, 0);
    cvMoments(dst_img2_mallett, &moment2_mallett, 0);
    cvMoments(dst_img2_pack, &moment2_pack, 0);

    //座標計算
    double m00_before = cvGetSpatialMoment(&moment_pack, 0, 0);
    double m10_before = cvGetSpatialMoment(&moment_pack, 1, 0);
    double m01_before = cvGetSpatialMoment(&moment_pack, 0, 1);
    double m00_after = cvGetSpatialMoment(&moment2_pack, 0, 0);
    double m10_after = cvGetSpatialMoment(&moment2_pack, 1, 0);
    double m01_after = cvGetSpatialMoment(&moment2_pack, 0, 1);
    double gX_before = m10_before/m00_before;
    double gY_before = m01_before/m00_before;
    double gX_after = m10_after/m00_after;
    double gY_after = m01_after/m00_after;
    double m00_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 0);
    double m10_mallett = cvGetSpatialMoment(&moment2_mallett, 1, 0);
    double m01_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 1);
    double gX_now_mallett = m10_mallett/m00_mallett;
    double gY_now_mallett = m01_mallett/m00_mallett;

    cvCircle(img2, cvPoint((int)gX_before, (int)gY_before), 50, CV_RGB(0,0,255), 6, 8, 0);

    cvLine(img2, cvPoint((int)gX_before, (int)gY_before), cvPoint((int)gX_after, (int)gY_after), cvScalar(0,255,0), 2);

    int target_destanceY = 480 - 30;//Y座標の距離を一定にしている。ディフェンスライン。
    //パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
    double a_inclination = (gY_after - gY_before) / (gX_after - gX_before);
    double b_intercept = gY_after - a_inclination * gX_after;
    printf("gX_after: %f\n",gX_after);
    printf("gY_after: %f\n",gY_after);
    printf("gX_before: %f\n",gX_before);
    printf("gY_before: %f\n",gY_before);
    printf("a_inclination: %f\n",a_inclination);
    printf("b_intercept: %f\n",b_intercept);

    int target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination);
    printf("target_coordinateX: %d\n",target_coordinateX);
    cvLine(img2, cvPoint((int)gX_after, (int)gY_after), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
    cvLine(img2, cvPoint(640, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
    cvLine(img2, cvPoint((int)gX_now_mallett, (int)gY_now_mallett), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
    cvPutText (img2, to_c_char((int)gX_now_mallett), cvPoint(460,30), &font, cvScalar(220,50,50));
    cvPutText (img2, to_c_char((int)target_coordinateX), cvPoint(560,30), &font, cvScalar(50,220,220));
    int amount_movement = gX_now_mallett - target_coordinateX;
    //2枚の画像比較1回で移動できる量の計算
    int max_amount_movement = CAM_PIX_WIDTH * 0.54 / 1; //CAM_PIX_WIDTH:640, 比較にかかる時間:0.27*2, 端までの移動時間:1s
	int target_direction;
	if(amount_movement > 0){
	    if(max_amount_movement < amount_movement){
    	    amount_movement = max_amount_movement;
    	}
		target_direction = 0;//時計回り
	}
	else if(amount_movement < 0){
		amount_movement = -amount_movement;//正の数にする
		if(max_amount_movement < amount_movement){
			amount_movement = max_amount_movement;
		}
		target_direction = 1;//反時計回り
	}    

	//pwm output
	double set_time_millis= 270 * amount_movement / max_amount_movement;//0.27ms*(0~1)
	gpioSetMode(18, PI_OUTPUT);
	gpioSetMode(19, PI_OUTPUT);
	gpioPWM(18, 128);
	gpioWrite(19, target_direction);
	int closest_frequency = gpioSetPWMfrequency(18, 2000);
	printf("setting_frequency: %d\n", closest_frequency);
	gpioSetTimerFunc(0, (int)set_time_millis, pwmReset);

    // 指定したウィンドウ内に画像を表示する
    cvShowImage("circle_sample", img);
    cvShowImage("circle_sample2", img2);

    while(1) {
        if(cv::waitKey(30) >= 0) {
            break;
        }
    }
	gpioTerminate();
    //Clean up used images
    cvReleaseImage(&img);
//    cvReleaseImage (&dst_img);

    cvDestroyAllWindows() ;

    return 0;
}
コード例 #26
0
ファイル: core_c.cpp プロジェクト: neutmute/emgucv
void cveSetImageCOI(IplImage* image, int coi)
{
   cvSetImageCOI(image, coi);
}