Exemplo n.º 1
0
int main( int argc, char** argv ) {

	// create a window with appropriate size. Windows name is determined by file name
	// supplied as an argument
	cvNamedWindow( argv[1], CV_WINDOW_AUTOSIZE );
	// open video
	g_capture = cvCreateFileCapture( argv[1] );
	// set read position in units of frames and retrieve total number of frames
	int frames = (int) cvGetCaptureProperty(
		g_capture,
		CV_CAP_PROP_FRAME_COUNT
	);


	// do not create treackbar if video does not include an information
	// about number of frames
	if( frames!=0 ) {

	cvCreateTrackbar(
		"Position",
		argv[1],
		&g_slider_position,
		frames,

		onTrackbarSlide
	);
	}

	// display video frame by frame
	IplImage* frame;
	while(1) {

		frame = cvQueryFrame( g_capture );
		if( !frame ) break;
		cvShowImage( argv[1], frame );
		// set trackbar to a current frame position
		cvSetTrackbarPos("Position", argv[1], g_slider_position);

		g_slider_position++;

		char c = cvWaitKey(33);
		// quit if ESC is pressed
		if( c == 27 ) break;

		}
	// free memory
	cvReleaseCapture( &g_capture );
	cvDestroyWindow( argv[1] );
	return(0);
}
Exemplo n.º 2
0
void Update_Gui_Windows() 
{
  static int first = 1;

  cvShowImage(eye_window, eye_image);
  cvShowImage(original_eye_window, original_eye_image);
  cvReleaseImage(&original_eye_image);
  cvShowImage(scene_window, scene_image);
  cvShowImage(ellipse_window, ellipse_image);
  cvResizeWindow(eye_window,RESOLUTION_WIDTH,RESOLUTION_HEIGHT);
  cvResizeWindow(original_eye_window,RESOLUTION_WIDTH,RESOLUTION_HEIGHT);
  cvResizeWindow(scene_window,RESOLUTION_WIDTH,RESOLUTION_HEIGHT);
  cvResizeWindow(ellipse_window,RESOLUTION_WIDTH,RESOLUTION_HEIGHT);
  // only OpenCV 0.9.6 has the function of cvMoveWindow(), now we are using version 0.9.5
  if (first) {
    cvMoveWindow(eye_window, RESOLUTION_WIDTH, 0);
    cvMoveWindow(scene_window, 2*RESOLUTION_WIDTH, 0);
    cvMoveWindow(ellipse_window, RESOLUTION_WIDTH, RESOLUTION_HEIGHT);
    first = 0;
  }

  cvSetTrackbarPos("Edge Threshold", control_window, pupil_edge_thres);
}
Exemplo n.º 3
0
/**********************************************************
 * Callback needed by OpenCV for controlling values set by
 * the track bars.
 *********************************************************/
void Recorder::onLowSlider(int val, void* userData) {
   if(!Recorder::mParams->setLowLevel(val)) {
      cvSetTrackbarPos("Low", "EVM", Recorder::mParams->getHighLevel());
   }
   Recorder::mParams->setLowLevel(Recorder::mLowLevel);
}
Exemplo n.º 4
0
/**********************************************************
 * Callback needed by OpenCV for controlling values set by
 * the track bars.
 *********************************************************/
void Recorder::onFilterSlider(int val, void* userData) {
   if(!Recorder::mParams->setFilter(val)) {
      cvSetTrackbarPos("Filter", "EVM", 0); //default to gaussian
      Recorder::mParams->setFilter(0);
   }
}
Exemplo n.º 5
0
void cv::setTrackbarPos( const std::string& trackbarName, const std::string& winName, int value )
{
    cvSetTrackbarPos(trackbarName.c_str(), winName.c_str(), value );
}
Exemplo n.º 6
0
int main( int argc, char** argv ) {
  int ikey=0;
  char key;
  IplImage *imCapt,*imDisp,*imDisp2;
  static int tb1_val,tb2_val;  /* Trackbar parameters */
  timewin *tw;
  buffer *bf_img;
  bbuffer *bf_blobs;
  int width,height;

  if(kb_lowresfl) {
    width=320;
    height=240;
  } else {
    width=640;
    height=480;
  }

  /* Set window title to a list of control keys */
  const char *window1 = "Controls: (n)eighbours, (d)istance, (b)lur type, (h)igh-res, (c)olourspace, (r)eset, (ESC) exit";
  const char *trackbar1 = "margin";
  const char *trackbar2 = "timesteps";
  
  int fno;
 
  cvInitSystem( argc,argv );
  
  /* Get an OpenCV camera handle */
  cam = cvCreateCameraCapture(0);

  /* Set size of image (appears to be ignored in Linux) */
  cvSetCaptureProperty(cam, CV_CAP_PROP_FRAME_WIDTH, width);
  cvSetCaptureProperty(cam, CV_CAP_PROP_FRAME_HEIGHT, height);

  /* Create a window with slider */
  cvNamedWindow(window1, CV_WINDOW_AUTOSIZE);
  cvSetMouseCallback(window1,callback_mouse, NULL );
  tb_margin = DEF_MARGIN; /* Default */
  tb1_val=tb_margin/TB1_SCALE*TB1_MAX;
  cvCreateTrackbar(trackbar1,window1,&tb1_val,TB1_MAX,callback_trackbar1);
  tb2_val = tb_timesteps = DEF_TIMESTEPS; /* Default */
  cvCreateTrackbar(trackbar2,window1,&tb2_val,TB2_MAX,callback_trackbar2);
  cvMoveWindow(window1, 100, 45);

  /* Allocate image buffers */
  if(kb_lowresfl)
    imDisp2 =  cvCreateImage( cvSize(width*4,height*2), IPL_DEPTH_8U, 3);
  else
    imDisp2 =  cvCreateImage( cvSize(width*2,height), IPL_DEPTH_8U, 3);
  imDisp =  cvCreateImage( cvSize(width*2,height), IPL_DEPTH_8U, 3);
  bf_img = buffer_new(height,width,3);
  bf_blobs = bbuffer_new(height,width,3);

  tw=timewin_new(DEF_TIMESIZE);
  fno=0;

  key=(char)cvWaitKey(500);
  
  while ( key !=27 ) {
    imCapt = cvQueryFrame(cam);
    buffer_iplcopy(bf_img,imCapt,1);
    /* Detect blobs */
    detect_and_render_blobs(bf_img,bf_blobs);

    /* Display result */
    flip_image(imCapt,imDisp);
    paste_image(imDisp,bf_blobs,width);
    
    if(kb_lowresfl) {
      upsample_image(imDisp,imDisp2);
      cvShowImage(window1, imDisp2);
    } else {
      cvShowImage(window1, imDisp);
    }

    ikey=cvWaitKey(5); /* Needed for highgui event processing */
    if(ikey>0) {
      key=(char)ikey;

      if(key == 'n') {
	kb_n8flag=1-kb_n8flag;
	printf("n8flag=%d\n",kb_n8flag);
      }
      
      if(key == 'd') {
	kb_normfl=1-kb_normfl;
	printf("normfl=%d\n",kb_normfl);
      }
      
      if(key == 'b') {
	kb_blurfl=1-kb_blurfl;
	printf("blurfl=%d\n",kb_blurfl);
      }
      if(key =='c') {
	kb_cspace=1-kb_cspace; /* Toggle colourspace */
	printf("cspace=%d\n",kb_cspace);
      }
      
      if(key == 'r') {
	tb_margin=DEF_MARGIN; /* Reset to default */
	tb1_val=tb_margin/TB1_SCALE*TB1_MAX;
	cvSetTrackbarPos(trackbar1,window1,tb1_val);
	tb2_val=tb_timesteps=DEF_TIMESTEPS;  /* Reset to default */
	cvSetTrackbarPos(trackbar2,window1,tb2_val);
	kb_n8flag=DEF_N8FLAG;
	kb_normfl=DEF_NORMFL;
	kb_blurfl=DEF_BLURFL;
	printf("timesteps =%d\n",tb_timesteps);
	printf("min_margin=%g\n",tb_margin);
	printf("n8flag=%d\n",kb_n8flag);
	printf("normfl=%d\n",kb_normfl);
	printf("blurfl=%d\n",kb_blurfl);
	cvWaitKey(1); 
	/* printf("tb1:%d\n",cvGetTrackbarPos(trackbar1,window1)); */
	/* printf("tb2:%d\n",cvGetTrackbarPos(trackbar2,window1)); */
      }
      if(key == 'h') {
	kb_lowresfl=1-kb_lowresfl; /* Toggle resolution */
	if(kb_lowresfl) {
	  width=320;
	  height=240;
	} else {
	  width=640;
	  height=480;
	}
	cvReleaseCapture(&cam);
	cam = cvCreateCameraCapture(0);
	/* Set size of image */
	cvSetCaptureProperty(cam, CV_CAP_PROP_FRAME_WIDTH, width);
	cvSetCaptureProperty(cam, CV_CAP_PROP_FRAME_HEIGHT, height);
	/* Free image buffers */
	cvReleaseImage( &imDisp );
	buffer_free(bf_img);
	bbuffer_free(bf_blobs);
	/* Allocate image buffers */
	imDisp =  cvCreateImage( cvSize(width*2,height), IPL_DEPTH_8U, 3);
	bf_img = buffer_new(height,width,3);
	bf_blobs = bbuffer_new(height,width,3);
      }
    }

    timewin_addtime(tw);
    fno++;
    if((fno%DEF_TIMESIZE)==0) printf("Frame rate %g Hz\n",timewin_rate(tw));
  }
  /* Clean up */
  timewin_free(tw);
  buffer_free(bf_img);
  bbuffer_free(bf_blobs);
  cvDestroyAllWindows();
  cvReleaseCapture(&cam);
  return 0;
}
void AdaptiveHistogramCamshift::AdaptHistogram(IplImage* hue, IplImage* mask, IplImage* out)
{
  // Prepare to analyze new track window
  const CvRect& updateHistRect = m_trackCompRect;

  // Now subdivide the track window and sum all pixels in each square
  // subdivision of size m_sBox using pixel values given by the
  // current hustogram.  When we are done, we will normalize the sum
  // from each subdivision and then be able to tell roughly how
  // similar each subregion is to the track histogram.

  // Make sure box size is greater than or equal to 2, and protect from
  // changes made in GUI
  const int sBox = std::max(m_sBox, 2);
  if (sBox != m_sBox)
  {
    m_sBox = sBox;
    cvSetTrackbarPos(ControlNames[ControlName_SBox], m_controlsGUIWndName.c_str(), sBox);
  }
  std::vector<float> subdivs;
  int numRows, numCols;
  SubdivideSumTrackWnd(sBox, &numRows, &numCols, &subdivs);

  // Check window is less than sBox in size.
  if (subdivs.empty())
  {
    return;
  }

  // Histogram for image subdivisions
  for (int i = 0; i < m_histDims; ++i)
  {
    float *bin = cvGetHistValue_1D(m_histTrackWnd, i);
    *bin = 0;
  }

  // Find the max vlaue of the subdivisions
  float maxVal = *std::max_element(subdivs.begin(), subdivs.end());;

  // DEBUG maxval
  //printf("maxVal: %f\n", maxVal);

  // Step through all subdivisions and weight them into a new histogram
  // if they are greater than minVal.  The weight function is r^2 where
  // r is the ratio of the subdivision value to the max subdivision
  // value.
  if (maxVal > 0)
  {
    float minVal = maxVal * 0.125f;
    float* subdivsCur = &subdivs[0];
    for (int i = 0; i < numRows; ++i)
    {
      for (int j = 0; j < numCols; ++j, ++subdivsCur)
      {
        // Create a box around this area
        CvPoint roiP1 = cvPoint(updateHistRect.x + sBox * j, updateHistRect.y + sBox * i);
        CvPoint roiP2 = cvPoint(roiP1.x + sBox, roiP1.y + sBox);

        if (*subdivsCur < minVal)
        {
          if(*subdivsCur > (minVal * 0.0625))
          {
            // Get ratio to max subdivision
            float ratioMaxSubdiv = *subdivsCur / maxVal;
            // Get color of surrounding box
            CvScalar boxColor = colors[GREEN];
            for (int colorInd = 0; colorInd < 3; ++colorInd)
            {
              boxColor.val[colorInd] *= ratioMaxSubdiv;
            }
            // Draw the box (darker green means less weight)
            if (out)
            {
              cvRectangle(out, roiP1, roiP2, boxColor, 1);
            }
          }
          else
          {
            // Draw a red box around this subdivision since it is not used
            if (out)
            {
              cvRectangle(out, roiP1, roiP2, colors[RED], 1 );
            }
          }
        }
        else
        {
          // Get ratio to max subdivision
          float ratioMaxSubdiv = *subdivsCur / maxVal;
          // Get weight into histogram of track window
          float weightVal = ratioMaxSubdiv * ratioMaxSubdiv;

          // DEBUG weights
          //printf("w %d: %f\t", j, weightVal);

          // Get color of surrounding box
          CvScalar boxColor = colors[GREEN];
          for (int colorInd = 0; colorInd < 3; ++colorInd)
          {
            boxColor.val[colorInd] *= ratioMaxSubdiv;
          }
          // Draw the box (darker green means less weight)
          if (out)
          {
            cvRectangle(out, roiP1, roiP2, boxColor, 1);
          }

          // Weight this subdivision into the histogram for the track window
          CvRect thisSubdivRect = cvRect(roiP1.x, roiP1.y, sBox, sBox);
          cvSetImageROI(hue, thisSubdivRect);
          cvSetImageROI(mask, thisSubdivRect);
          cvCalcHist(&hue, m_histSubdiv, 0, mask);
          cvResetImageROI(hue);
          cvResetImageROI(mask);

          // Weight this into the track window histogram
          for (int binNum = 0; binNum < m_histDims; ++binNum)
          {
            float* thisBin = cvGetHistValue_1D(m_histTrackWnd, binNum);
            *thisBin *= (1.0f - weightVal);
            *thisBin += static_cast<float>(cvGetReal1D(m_histSubdiv->bins, binNum)) * weightVal;
          }
        }
      }
      // DEBUG weights
      //printf("\n");
    }
    // DEBUG weights
    //printf("\n");
  }

  // DEBUG histograms
  //printf("Wnd BEFORE WT\n");
  //for( int i = 0; i < m_histDims; i++ ) {
  //  float *bin = cvGetHistValue_1D( m_histTrackWnd, i );
  //  printf("%2d: %3.1f  ", i, *bin);
  //  if( 0 == (i+1) % 8 ) {
  //    printf("\n");
  //  }
  //}


  // Now scale track window histogram to tracking histogram scale
  float trackWndHistMaxVal;
  cvGetMinMaxHistValue(m_histTrackWnd, 0, &trackWndHistMaxVal, 0, 0);
  cvConvertScale(m_histTrackWnd->bins, m_histTrackWnd->bins,
                 trackWndHistMaxVal ? HIST_SCALE / trackWndHistMaxVal : 0., 0);

  // Use aging to weight track window histogram into tracking histogram
  float averageBin = 0;
  for (int binNum = 0; binNum < m_histDims; ++binNum)
  {
    float* thisBin = cvGetHistValue_1D(m_hist, binNum);
    *thisBin *= (1.0f - (m_ageRatio / 100.0f));
    *thisBin += static_cast<float>(cvGetReal1D(m_histTrackWnd->bins, binNum)) *
                (m_ageRatio / 100.0f);
    averageBin += *thisBin;
  }
  averageBin /= m_histDims;

  // DEBUG average bin
  //printf("Avg bin: %f.\n", averageBin);

  // See if this histogram is dying
  //if( averageBin < SMALLEST_AVG_HIST_BIN ) {
  //    cvConvertScale( m_hist->bins, m_hist->bins, SMALLEST_AVG_HIST_BIN / averageBin, 0 );

  // DEBUG averageBin
  //printf("Hist saved for average bin: %f\n", averageBin);
  //}

  // DEBUG track hist
  //printf("Track\n");
  //for( int i = 0; i < m_histDims; i++ ) {
  //    float *bin = cvGetHistValue_1D( m_hist, i );
  //    printf("%2d: %3.1f  ", i, *bin);
  //    if( 0 == (i+1) % 8 ) {
  //        printf("\n");
  //    }
  //}
  //printf("\n");

  // Now compute histogram image
  cvZero(m_histImg);
  for (int i = 0; i < m_histDims; ++i)
  {
    const double raw = cvGetReal1D(m_hist->bins, i) * (m_histImg->height / HIST_SCALE);
    const int val = cvRound(raw);
    CvScalar color = hsv2rgb((i * m_histRanges[1]) / m_histDims);
    cvRectangle(m_histImg,
                cvPoint(i * m_binWidth, m_histImg->height),
                cvPoint((i + 1) * m_binWidth, m_histImg->height - val),
                color, -1, 8, 0);
  }

  // Show histogram
  if (m_showHistogram)
  {
    ShowHistogram();
  }
}
Exemplo n.º 8
0
int
main (int argc, char **argv)
{
  //読み込む動画ファイル名
  char* filename;
  char* imagename;
  if (argc == 2){
    filename = argv[1];
  }else{
    usage();
    return -1;
  }

    printf ("########### #############\n"
      "video_test, using OpenCV version %s (%d.%d.%d)\n",
	    CV_VERSION,
	    CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION);

    printf( "Hot keys: \n"
            "\tESC - quit the program\n"
            "\tp - play/stop\n"
            "\ts - save current frame as jpg\n"
            "\tother key - next frame\n"
            "\n" );

  double w = 320, h = 240;
  int c;

  //指定したAVIファイルが見つからない場合
  if(NULL==(capture = cvCaptureFromAVI(filename))){
    fprintf(stderr,"指定のaviファイル %s が見つかりませんでした.", filename);
    return -1;
  }

  // キャプチャサイズを設定する.
  cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_WIDTH, w);
  cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_HEIGHT, h);

  cvNamedWindow ("Capture", CV_WINDOW_AUTOSIZE);

  //スライドバーとかつけちゃう
  int frames = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
  if(frames != 0){
    cvCreateTrackbar("Position", "Capture", &slider_position, frames, onTrackBarSlide);
  }

  int play = 0;
  // ビデオから画像をキャプチャする
  while (1) {
    frame = cvQueryFrame (capture);

    slider_position = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES);
    cvSetTrackbarPos("Position", "Capture", slider_position);

    cvShowImage ("Capture", frame);

    if(play){
      c = cvWaitKey (2);
    }else{
      c = cvWaitKey (0);
    }

    // ESC で終了
    if (c == '\x1b')
      break;
 
    // キー入力色々 
    switch( (char) c ) {
      case 'p':
          play ^= 1;
          break;
      case 's':
          //printf("%d\n", num);
          // ↓なぜかBus error
          //sprintf(imagename, "image_%d.jpg", num);

          imagename = "image.jpg";
          cvSaveImage(imagename, frame);
          printf("Current frame is saved as %s\n", imagename); 
          break;
      default:
          ;
    }
  }

  cvReleaseCapture (&capture);
  cvDestroyWindow ("Capture");

  return 0;
}
Exemplo n.º 9
0
void cv::setTrackbarPos( const String& trackbarName, const String& winName, int value )
{
    CV_TRACE_FUNCTION();
    cvSetTrackbarPos(trackbarName.c_str(), winName.c_str(), value );
}
Exemplo n.º 10
0
void BinarizationViewer::showBinarizedImgs() {
    
    Mat srcBGRImg, srcHSVImg, srcYCrCbImg;
    Mat bgrChannelImgs[3], hsvChannelImgs[3], ycrcbChannelImgs[3];

    vector<string> channelNames = {};
    int trackbarInitValue = 128;
    namedWindow("Blue", CV_WINDOW_AUTOSIZE);
    namedWindow("Green", CV_WINDOW_AUTOSIZE);
    namedWindow("Red", CV_WINDOW_AUTOSIZE);
    namedWindow("Hue", CV_WINDOW_AUTOSIZE);
    namedWindow("Saturation", CV_WINDOW_AUTOSIZE);
    namedWindow("Value", CV_WINDOW_AUTOSIZE);
    namedWindow("Y", CV_WINDOW_AUTOSIZE);
    namedWindow("Cr", CV_WINDOW_AUTOSIZE);
    namedWindow("Cb", CV_WINDOW_AUTOSIZE);

    cvCreateTrackbar("B_Threshold", "Blue", &trackbarInitValue, 255, onBlueTrackbar);
    cvCreateTrackbar("G_Threshold", "Green", &trackbarInitValue, 255, onGreenTrackbar);
    cvCreateTrackbar("R_Threshold", "Red", &trackbarInitValue, 255, onRedTrackbar);
    cvCreateTrackbar("H_Threshold", "Hue", &trackbarInitValue, 255, onHueTrackbar);
    cvCreateTrackbar("S_Threshold", "Saturation", &trackbarInitValue, 255, onSaturationTrackbar);
    cvCreateTrackbar("V_Threshold", "Value", &trackbarInitValue, 255, onValueTrackbar);
    cvCreateTrackbar("Y_Threshold", "Y", &trackbarInitValue, 255, onYTrackbar);
    cvCreateTrackbar("Cr_Threshold", "Cr", &trackbarInitValue, 255, onCrTrackbar);
    cvCreateTrackbar("Cb_Threshold", "Cb", &trackbarInitValue, 255, onCbTrackbar);

    cvSetTrackbarPos("B_Threshold", "Blue", 128);
    cvSetTrackbarPos("G_Threshold", "Green", 128);
    cvSetTrackbarPos("R_Threshold", "Red", 128);
    cvSetTrackbarPos("H_Threshold", "Hue", 128);
    cvSetTrackbarPos("S_Threshold", "Saturation", 128);
    cvSetTrackbarPos("V_Threshold", "Value", 128);
    cvSetTrackbarPos("Y_Threshold", "Y", 128);
    cvSetTrackbarPos("Cr_Threshold", "Cr", 128);
    cvSetTrackbarPos("Cb_Threshold", "Cb", 128);

    _isShowing = true;
	while(_isShowing) {
        srcBGRImg = _cameraManager.getFrame();

        cvtColor(srcBGRImg, srcHSVImg, CV_BGR2HSV);
        cvtColor(srcBGRImg, srcYCrCbImg, CV_BGR2YCrCb);

        split(srcBGRImg, bgrChannelImgs);
        split(srcHSVImg, hsvChannelImgs);
        split(srcYCrCbImg, ycrcbChannelImgs);

        threshold(bgrChannelImgs[0], bgrChannelImgs[0], binarizationViewerBlueThreshold, 255, CV_THRESH_BINARY);
        threshold(bgrChannelImgs[1], bgrChannelImgs[1], binarizationViewerGgreenThreshold, 255, CV_THRESH_BINARY);
        threshold(bgrChannelImgs[2], bgrChannelImgs[2], binarizationViewerRedThreshold, 255, CV_THRESH_BINARY);
        threshold(hsvChannelImgs[0], hsvChannelImgs[0], binarizationViewerHueThreshold, 255, CV_THRESH_BINARY);
        threshold(hsvChannelImgs[1], hsvChannelImgs[1], binarizationViewerSaturationThreshold, 255, CV_THRESH_BINARY);
        threshold(hsvChannelImgs[2], hsvChannelImgs[2], binarizationViewerValueThreshold, 255, CV_THRESH_BINARY);
        threshold(ycrcbChannelImgs[0], ycrcbChannelImgs[0], binarizationViewerYThreshold, 255, CV_THRESH_BINARY);
        threshold(ycrcbChannelImgs[1], ycrcbChannelImgs[1], binarizationViewerCrThreshold, 255, CV_THRESH_BINARY);
        threshold(ycrcbChannelImgs[2], ycrcbChannelImgs[2], binarizationViewerCbThreshold, 255, CV_THRESH_BINARY);
        
        imshow("src", srcBGRImg);
        imshow("Blue", bgrChannelImgs[0]);
        imshow("Green", bgrChannelImgs[1]);
        imshow("Red", bgrChannelImgs[2]);
        imshow("Hue", hsvChannelImgs[0]);
        imshow("Saturation", hsvChannelImgs[1]);
        imshow("Value", hsvChannelImgs[2]);
        imshow("Y", ycrcbChannelImgs[0]);
        imshow("Cr", ycrcbChannelImgs[1]);
        imshow("Cb", ycrcbChannelImgs[2]);

		int key = waitKey(1);
        if(key == 27) break;
    }

    destroyAllWindows();
}
Exemplo n.º 11
0
/*
 * -d 0,1,2,3,4 =>/dev/video0,video1,...
 * -f static file
 * 
 */
int main (int argc, char * argv[]) {
	//Two boolean variables
	char quit = 0; //Exit main program loop?
	char grab_frame = 1; //Do we grab a new frame from the camera?
	
	int thresh1=DEFAULT_TRACKBAR_VAL, thresh2=DEFAULT_TRACKBAR_VAL; //These two variables will hold trackbar positions.

	if(argc==1){
		camID=0;
		printf("set default camera 0\n");
	}else if(option(argc,argv) < 0 ){
		printf( "Wrong args!!!\n");
		exit(EXIT_FAILURE);
	}

	//These are pointers to IPL images, which will hold the result of our calculations
	IplImage *small_image = NULL; /*cvCreateImage(cvSize(IMG_WIDTH,IMG_HEIGHT),IPL_DEPTH_8U,3)*/; //size, depth, channels (RGB = 3)
	IplImage *small_grey_image = NULL;/*cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1)*/; //1 channel for greyscale
	IplImage *edge_image = NULL; /*cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1)*/; //We use cvGetSize to make sure the images are the same size. 
	
	//CvMemStorage and CvSeq are structures used for dynamic data collection. CvMemStorage contains pointers to the actual
	//allocated memory, but CvSeq is used to access this data. Here, it will hold the list of image contours.
	CvMemStorage *storage = cvCreateMemStorage(0);
	CvSeq *contours = 0;
	
	CvCapture *camera=NULL;
	if(camID>=0){
		camera = cvCreateCameraCapture(camID); //This function tries to connect to the first (0th) camera it finds and creates a structure to refer to it.
		if(!camera){ //cvCreateCameraCapture failed, most likely there is no camera connected.
			printf("Could not find a camera to capture from...\n"); //Notify the user...
			return -1; //And quit with an error.
		}
	}
	cvNamedWindow(MAIN_WINNAME, CV_WINDOW_AUTOSIZE); //Here we create a window and give it a name. The second argument tells the window to not automatically adjust its size.
	
	//We add two trackbars (sliders) to the window. These will be used to set the parameters for the Canny edge detection.
	cvCreateTrackbar("Thresh1", MAIN_WINNAME, &thresh1, 256, 0);
	cvCreateTrackbar("Thresh2", MAIN_WINNAME, &thresh2, 256, 0);
	
	//Set the trackbar position to the default value. 
	cvSetTrackbarPos("Thresh1", MAIN_WINNAME, DEFAULT_TRACKBAR_VAL); //Trackbar name, window name, position
	cvSetTrackbarPos("Thresh2", MAIN_WINNAME, DEFAULT_TRACKBAR_VAL);
	
	//Now set the mouse callback function. We need to pass the location of the contours so that it will be able to access this information.
	cvSetMouseCallback(MAIN_WINNAME, onMouse, &contours); //Window name, function pointer, user-defined parameters.
	
	IplImage *frame=NULL; //This will point to the IPL image we will retrieve from the camera.
	IplImage *frame_org=NULL;
	if(camID < 0){
		frame_org = cvLoadImage(orgFile, CV_LOAD_IMAGE_UNCHANGED);
		frame = cvLoadImage(inFile, CV_LOAD_IMAGE_UNCHANGED);
	}
	//This is the main program loop. We exit the loop when the user sets quit to true by pressing the "esc" key
	while(!quit){
		int c =0;
		if(camID >= 0){
			c=cvWaitKey(30); //Wait 30 ms for the user to press a key.
			
			//Respond to key pressed.
			switch(c){
				case 32: //Space
					grab_frame = !grab_frame; //Reverse the value of grab_frame. That way, the user can toggle by pressing the space bar.
					break;
				case 27: //Esc: quit application when user presses the 'esc' key.
					quit = 1; //Get out of loop
					break;
			};

			//If we don't have to grab a frame, we're done for this pass.
			if(!grab_frame)continue;	

			//Grab a frame from the camera.
			frame = cvQueryFrame(camera);
			
			if(!frame) continue; //Couldn't get an image, try again next time.
		}
		
		//In computer vision, it's always better to work with the smallest images possible, for faster performance.
		//cvResize will use inter-linear interpolation to fit frame into small_image.
		if(small_image==NULL){
			small_image = cvCreateImage(cvSize(frame->width,frame->height),IPL_DEPTH_8U,3); //size, depth, channels (RGB = 3)
			printf("w=%d, h=%d\n", frame->width,frame->height);
		}
		if(small_grey_image==NULL) small_grey_image=cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1);
		if(edge_image==NULL) edge_image = cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1);

		cvResize(frame, small_image, CV_INTER_LINEAR);
		
		//Many computer vision algorithms do not use colour information. Here, we convert from RGB to greyscale before processing further.
		cvCvtColor(small_image, small_grey_image, CV_RGB2GRAY);

		//We then detect edges in the image using the Canny algorithm. This will return a binary image, one where the pixel values will be 255 for 
		//pixels that are edges and 0 otherwise. This is unlike other edge detection algorithms like Sobel, which compute greyscale levels.
		cvCanny(small_grey_image, edge_image, (double)thresh1, (double)thresh2, 3); //We use the threshold values from the trackbars and set the window size to 3
		
		//The edges returned by the Canny algorithm might have small holes in them, which will cause some problems during contour detection.
		//The simplest way to solve this problem is to "dilate" the image. This is a morphological operator that will set any pixel in a binary image to 255 (on) 
		//if it has one neighbour that is not 0. The result of this is that edges grow fatter and small holes are filled in.
		//We re-use small_grey_image to store the results, as we won't need it anymore.
		cvDilate(edge_image, small_grey_image, 0, 1);
		
		//Once we have a binary image, we can look for contours in it. cvFindContours will scan through the image and store connected contours in "sorage".
		//"contours" will point to the first contour detected. CV_RETR_TREE means that the function will create a contour hierarchy. Each contour will contain 
		//a pointer to contours that are contained inside it (holes). CV_CHAIN_APPROX_NONE means that all the contours points will be stored. Finally, an offset
		//value can be specified, but we set it to (0,0).
		cvFindContours(small_grey_image, storage, &contours, sizeof(CvContour), CV_RETR_TREE, 
					   CV_CHAIN_APPROX_NONE, cvPoint(0,0));
		CvContour *max_contour = LargestContour(&contours);
		//This function will display contours on top of an image. We can specify different colours depending on whether the contour in a hole or not.
		cvDrawContours(small_image, contours, CV_RGB(255,0,0), CV_RGB(0,255,0), MAX_CONTOUR_LEVELS, 
					   1, CV_AA, cvPoint(0,0));
		
		//Finally, display the image in the window.
		cvShowImage(MAIN_WINNAME, small_image);
		if(camID< 0){
			//draw the max contour on the original
			cvDrawContours(frame_org, (CvSeq *)max_contour, CV_RGB(0, 255,0), CV_RGB(0,200,0), 
						0, 1, CV_AA, cvPoint(roi_x,roi_y));		
			cvShowImage(orgFile, frame_org);
			c=cvWaitKey(0); //Wait 30 ms for the user to press a key.
			//Respond to key pressed.
			if(c == 27) quit = 1; //Get out of loop
		}
	}
	
	//Clean up before quitting.
	cvDestroyAllWindows(); //This function releases all the windows created so far.
	cvReleaseCapture(&camera); //Release the camera capture structure.
	
	//release memory
	cvReleaseMemStorage(&storage);
	
	//Release images
	cvReleaseImage(&small_image); //We pass a pointer to a pointer, because cvReleaseImage will set the image pointer to 0 for us.
	cvReleaseImage(&small_grey_image);
	cvReleaseImage(&edge_image);
	if( (camID < 0) && frame ){
		cvReleaseImage(&frame);
	}
	return 0; //We're done.
}
Exemplo n.º 12
0
/*
 * Update's trackbar positions for variables that can be changed by the software
 *
 */
void UpdateGUI(Experiment* exp) {

		cvSetTrackbarPos("DLPFlashOn", exp->WinCon1, (exp->Params->DLPOnFlash));
		cvSetTrackbarPos("DLPOn", exp->WinCon1, (exp->Params->DLPOn));

		/** Illumination Controls **/
		cvSetTrackbarPos("x", exp->WinCon1, (exp->Params->IllumSquareOrig.x));
		cvSetTrackbarPos("y", exp->WinCon1, (exp->Params->IllumSquareOrig.y));
		cvSetTrackbarPos("xRad", exp->WinCon1, (exp->Params->IllumSquareRad.width));
		cvSetTrackbarPos("yRad", exp->WinCon1, (exp->Params->IllumSquareRad.height));

		cvSetTrackbarPos("IllumSweepHT", exp->WinCon1, (exp->Params->IllumSweepHT));
		cvSetTrackbarPos("IllumSweepOn", exp->WinCon1, (exp->Params->IllumSweepOn));


		/** Threshold **/
		cvSetTrackbarPos("Threshold", exp->WinCon1, (exp->Params->BinThresh));
		cvSetTrackbarPos("Gauss=x*2+1",exp->WinCon1, exp->Params->GaussSize);

		/** Updated Temporal IQ **/
		/** Temporal Coding **/
		cvSetTrackbarPos("TemporalIQ", exp->WinCon1, (exp->Params->TemporalOn));


		cvSetTrackbarPos("IllumDuration", exp->WinCon1,
				(exp->Params->IllumDuration));


		/** Protocol Stuff **/
		/** If we have loaded a protocol, update protocol specific sliders **/
		if (exp->pflag) {
			cvSetTrackbarPos("Protocol", exp->WinCon2, exp->Params->ProtocolUse);
			cvSetTrackbarPos("ProtoStep", exp->WinCon2,
					(exp->Params->ProtocolStep));
		}

		/** Floodlight **/
		cvSetTrackbarPos("FloodLight", exp->WinCon2,exp->Params->IllumFloodEverything);


		/** Record **/
		cvSetTrackbarPos("RecordOn", exp->WinCon1, (exp->Params->Record));

		/** Record **/
		cvSetTrackbarPos("On", exp->WinCon1, (exp->Params->OnOff));

		/**Stage Speed **/
		cvSetTrackbarPos("StageSpeed",exp->WinCon1,(exp->Params->stageSpeedFactor));

	return;

}
Exemplo n.º 13
0
// Runs the dot detector and sends detected dots to server on port TODO Implement headless. Needs more config options and/or possibly a config file first though
int run( const char *serverAddress, const int serverPort, char headless ) {
    char calibrate_exposure = 0, show = ~0, flip = 0, vflip = 0, done = 0, warp = 0; //"Boolean" values used in this loop
    char noiceReduction = 2; //Small counter, so char is still ok.
    int i, sockfd; //Generic counter
    int dp = 0, minDist = 29, param1 = 0, param2 = 5; // Configuration variables for circle detection 
    int minDotRadius = 1;
    int detected_dots; //Detected dot counter
    int returnValue = EXIT_SUCCESS;
    int captureControl; //File descriptor for low-level camera controls
    int currentExposure = 150;
    int maxExposure = 1250; //Maximum exposure supported by the camera TODO Get this from the actual camera
    Color min = { 0, 70, 0, 0 }; //Minimum color to detect
    Color max = { 255, 255, 255, 0 }; //Maximum color to detect
    CvScalar colorWhite = cvScalar( WHITE ); //Color to draw detected dots on black and white surface
    BoundingBox DD_mask; //The box indicating what should and what should not be considered for dot search
    BoundingBox DD_transform; //The box indicating the plane we are looking at( and as such is the plane we would transform from )
    BoundingBox DD_transform_to; //The plane we are transforming to
    CvCapture *capture = NULL; //The camera
    CvMemStorage *storage; //Low level memory area used for dynamic structures in OpenCV
    CvSeq *seq; //Sequence to store detected dots in
    IplImage *grabbedImage = NULL; //Raw image from camera( plus some overlay in the end )
    IplImage *imgThreshold = NULL; //Image with detected dots
    IplImage *mask = NULL; //Mask to be able to remove uninteresting areas
    IplImage *coloredMask = NULL; //Mask to be able to indicate above mask on output image
    CvFont font; //Font for drawing text on images
    SendQueue *queue; //Head of the linked list that is the send queue
    char strbuf[255]; //Generic buffer for text formatting( with sprintf())
    struct timeval oldTime, time, diff; //Structs for measuring FPS
    float lastKnownFPS = 0; //Calculated FPS
    CvMat* pointRealMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* pointTransMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* transMat = cvCreateMat( 3,3,CV_32FC1 ); //Translation matrix for transforming input to a straight rectangle
    ClickParams clickParams = { TOP_LEFT, NULL, &DD_transform_to, transMat }; //Struct holding data needed by mouse-click callback function

    // Set up network
    sockfd = initNetwork( serverAddress, serverPort );
    if( sockfd == -1 ) {
        fprintf( stderr, "ERROR: initNetwork returned -1\n");
        return EXIT_FAILURE;
    }
    queue = initSendQueue();

    if( openCamera( &capture, &captureControl ) == 0 ) {
        fprintf( stderr, "ERROR: capture is NULL \n" );
        return EXIT_FAILURE;
    }

    if( ( disableAutoExposure( captureControl ) ) == -1 ) {
        fprintf( stderr, "ERROR: Cannot disable auto exposure \n" );
        //return EXIT_FAILURE;
    }

    if( ( updateAbsoluteExposure( captureControl, currentExposure ) ) == 0 ) {
        fprintf( stderr, "ERROR: Cannot set exposure\n");
    }

    // Create a window in which the captured images will be presented
    cvNamedWindow( imagewindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the configuration sliders and the detection frame TODO This is kind of a hack. Make a better solution
    cvNamedWindow( configwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the transformed image. Handy to see how the dots are translated, but not needed for functionality
    if( warp ) cvNamedWindow( warpwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create sliders to adjust the lower color boundry
    cvCreateTrackbar( red_lable  , configwindowname, &min.red,   255, NULL );
    cvCreateTrackbar( green_lable, configwindowname, &min.green, 255, NULL );
    cvCreateTrackbar( blue_lable , configwindowname, &min.blue,  255, NULL );

    //Create sliters for the contour based dot detection
    cvCreateTrackbar( min_area_lable, configwindowname, &minDotRadius,255, NULL );

    /* Slider for manual exposure setting */
    cvCreateTrackbar( exposure_lable, configwindowname, &currentExposure, maxExposure, NULL );

    //Create the memory storage
    storage = cvCreateMemStorage( 0 );

    // void cvInitFont( font, font_face, hscale, vscale, shear=0, thickness=1, line_type=8 )
    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 8 );

    // Grab an initial image to be able to fetch image size before the main loop.
    grabbedImage = cvQueryFrame( capture );

    //Move the two windows so both are visible at the same time
    cvMoveWindow( imagewindowname, 0, 10 );
    cvMoveWindow( configwindowname, grabbedImage->width+2, 10 );

    //TODO Move these three inits to a function
    // Set masking defaults TODO load from file? Specify file for this file loading?
    DD_mask.topLeft.x = 0;  
    DD_mask.topLeft.y = 0;

    DD_mask.topRight.x = grabbedImage->width-1;
    DD_mask.topRight.y = 0;

    DD_mask.bottomLeft.x = 0;
    DD_mask.bottomLeft.y = grabbedImage->height-1;

    DD_mask.bottomRight.x = grabbedImage->width-1;
    DD_mask.bottomRight.y = grabbedImage->height-1;

    // Set transformation defaults TODO load from file? Specify file for this file loading?
    DD_transform.topLeft.x = 0;  
    DD_transform.topLeft.y = 0;

    DD_transform.topRight.x = grabbedImage->width-1;
    DD_transform.topRight.y = 0;

    DD_transform.bottomLeft.x = 0;
    DD_transform.bottomLeft.y = grabbedImage->height-1;

    DD_transform.bottomRight.x = grabbedImage->width-1;
    DD_transform.bottomRight.y = grabbedImage->height-1;

    // Set the transformation destination
    DD_transform_to.topLeft.x = 0;  
    DD_transform_to.topLeft.y = 0;

    DD_transform_to.topRight.x = grabbedImage->width-1;
    DD_transform_to.topRight.y = 0;

    DD_transform_to.bottomLeft.x = 0;
    DD_transform_to.bottomLeft.y = grabbedImage->height-1;

    DD_transform_to.bottomRight.x = grabbedImage->width-1;
    DD_transform_to.bottomRight.y = grabbedImage->height-1;

    calculateTransformationMatrix( &DD_transform, &DD_transform_to, transMat );

    // Set callback function for mouse clicks
    cvSetMouseCallback( imagewindowname, calibrateClick, ( void* ) &clickParams );

    gettimeofday( &oldTime, NULL );

    // Main loop. Grabbs an image from cam, detects dots, sends dots,and prints dots to images and shows to user
    while( !done ) {

        //PROFILING_PRO_STAMP(); //Uncomment this and the one in the end of the while-loop, and comment all other PROFILING_* to profile main-loop

        // ------ Common actions
        cvClearMemStorage( storage );
        detected_dots = 0;

        //Grab a fram from the camera
        PROFILING_PRO_STAMP();
        grabbedImage = cvQueryFrame( capture );
        PROFILING_POST_STAMP( "cvQueryFrame");

        if( grabbedImage == NULL ) {
            fprintf( stderr, "ERROR: frame is null...\n" );
            getchar();
            returnValue = EXIT_FAILURE;
            break;
        }

        //Flip images to act as a mirror. 
        if( show && flip ) {
            cvFlip( grabbedImage, grabbedImage, 1 );
        }
        if( show && vflip ) {
            cvFlip( grabbedImage, grabbedImage, 0 );
        }

        // ------ State based actions
        switch( state ) {
            case GRAB_DOTS:

                //Create detection image
                imgThreshold = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvInRangeS( grabbedImage, cvScalar( DD_COLOR( min )), cvScalar( DD_COLOR( max )), imgThreshold );

                //Mask away anything not in our calibration area
                mask = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvZero( mask );
                cvFillConvexPoly( mask, ( CvPoint* ) &DD_mask, 4, cvScalar( WHITE ), 1, 0 );
                cvAnd( imgThreshold, mask, imgThreshold, NULL );

                // Invert mask, increase the number of channels in it and overlay on grabbedImage //TODO Tint the mask red before overlaying
                cvNot( mask, mask );
                coloredMask = cvCreateImage( cvGetSize( grabbedImage ), grabbedImage->depth, grabbedImage->nChannels );
                cvCvtColor( mask, coloredMask, CV_GRAY2BGR );
                cvAddWeighted( grabbedImage, 0.95, coloredMask, 0.05, 0.0, grabbedImage );


                // Reduce noise. 
                // Erode is kind of floor() of pixels, dilate is kind of ceil()
                // I'm not sure which gives the best result.
                switch( noiceReduction ) {
                    case 0: break; //No noice reduction at all
                    case 1: cvErode( imgThreshold, imgThreshold, NULL, 2 ); break;
                    case 2: cvDilate( imgThreshold, imgThreshold, NULL, 2 ); break;
                }

                // Warp the warp-image. We are reusing the coloredMask variable to save some space
                PROFILING_PRO_STAMP();
                if( show && warp ) cvWarpPerspective( grabbedImage, coloredMask, transMat, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ));
                PROFILING_POST_STAMP( "Warping perspective" );


                // Find all dots in the image
                PROFILING_PRO_STAMP();

                // Clear old data from seq
                seq = 0;

                // Find the dots
                cvFindContours(
                        imgThreshold,
                        storage,
                        &seq,
                        sizeof( CvContour ),
                        CV_RETR_LIST,
                        CV_CHAIN_APPROX_SIMPLE,
                        cvPoint( 0,0 )
                        );
                // cvFindContours destroys the original image, so we wipe it here
                // and then repaints the detected dots later
                cvZero( imgThreshold );

                PROFILING_POST_STAMP( "Dot detection" );

                //Process all detected dots
                PROFILING_PRO_STAMP();
                for( ; seq != 0; seq = seq->h_next ) {

                    // Calculate radius of the detected contour
                    CvRect rect =( ( CvContour * )seq )->rect;
                    float relCenterX = rect.width / 2;
                    float relCenterY = rect.height / 2;

                    // Make sure the dot is big enough
                    if( relCenterX < minDotRadius || relCenterY < minDotRadius ) {
                        continue;
                    }

                    // Note that we have found another dot
                    ++detected_dots;

                    // Transform the detected dot according to transformation matrix.
                    float absCenter[] = { rect.x + relCenterX, rect.y + relCenterY };
                    pointRealMat->data.fl = absCenter;
                    cvPerspectiveTransform( pointRealMat, pointTransMat, transMat );

                    // Draw the detected contour back to imgThreshold
                    // Draw the detected dot both to real image and to warped( if warp is active )
                    if( show ) {
                        cvDrawContours( imgThreshold, seq, colorWhite, colorWhite, -1, CV_FILLED, 8, cvPoint( 0,0 ) );
                        drawCircle( absCenter[0], absCenter[1], ( relCenterX + relCenterY ) / 2, grabbedImage );
                        if( warp ) {
                            drawCircle( pointTransMat->data.fl[0], pointTransMat->data.fl[1], ( relCenterX + relCenterY ) / 2, coloredMask );
                        }
                    }

                    // Add detected dot to to send queue
                    addPointToSendQueue( pointTransMat->data.fl, queue ); 
                }

                PROFILING_POST_STAMP("Painting dots");

                //Calculate framerate
                gettimeofday( &time, NULL );
                timeval_subtract( &diff, &time, &oldTime );
                lastKnownFPS = lastKnownFPS * 0.7 + ( 1000000.0 / diff.tv_usec ) * 0.3; //We naïvly assume we have more then 1 fps
                oldTime = time;

                //Send the dots detected this frame to the server
                PROFILING_PRO_STAMP();
                sendQueue( sockfd, queue );
                clearSendQueue( queue );
                PROFILING_POST_STAMP( "Sending dots" );

                /* If calibrating, do the calibration */
                if( calibrate_exposure ) {
                    int ret;
                    ret = calibrateExposureLow( captureControl, detected_dots, &currentExposure, DD_MAX_EXPOSURE, lastKnownFPS );
                    switch( ret ) {
                        case 0: // We are done. Let's leave calibration mode
                            calibrate_exposure = 0;
                            printf( "done\n" );
                            break;

                        case -1: // We hit the upper limit with no detected dots
                            fprintf( stderr, "Reached upper limit (%d). Aborting!\n", DD_MAX_EXPOSURE );
                            calibrate_exposure = 0;
                            break;

                        case -2: // We hit lower limit with more then one dot detected
                            fprintf( stderr, "Too bright. More then one dot found even with minimal exposure. Aborting!\n");
                            calibrate_exposure = 0;
                            break;

                        case -3: //No conclusive results.
                            fprintf( stderr, "No conclusive results. Giving up\n" );
                            calibrate_exposure = 0;
                            break;
                    }
                }

                break; //End of GRAB_DOTS

            case SELECT_TRANSFORM:
                //Falling through here. Poor man's multi-case clause. Not putting this in default as we might
                //want to do different things in these two some day.
            case SELECT_MASK:
                snprintf( strbuf, sizeof( strbuf ), "Select %s point", pointTranslationTable[clickParams.currentPoint]);
                cvDisplayOverlay( imagewindowname, strbuf, 5 );
                break; //End of SELECT_MASK and SELECT_TRANSFORM
        }

        // Paint the corners of the detecting area and the calibration area
        paintOverlayPoints( grabbedImage, &DD_transform );

        //Print some statistics to the image
        if( show ) {
            snprintf( strbuf, sizeof( strbuf ), "Dots: %i", detected_dots ); //Print number of detected dots to the screen
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 20 ), &font, cvScalar( WHITE ));
            snprintf( strbuf, sizeof( strbuf ), "FPS: %.1f", lastKnownFPS );
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 40 ), &font, cvScalar( WHITE ));
            cvCircle( grabbedImage, cvPoint( 15, 55 ), minDotRadius, cvScalar( min.blue, min.green, min.red, min.alpha ), -1, 8, 0 ); // Colors given in order BGR-A, Blue, Green, Red, Alpha
        }

        //Show images 
        PROFILING_PRO_STAMP();
        if( show ) {
            cvShowImage( configwindowname, imgThreshold );
            cvShowImage( imagewindowname, grabbedImage );
            if( warp ) cvShowImage( warpwindowname, coloredMask );
        }
        PROFILING_POST_STAMP("Showing images");

        //Release the temporary images
        cvReleaseImage( &imgThreshold );
        cvReleaseImage( &mask );
        cvReleaseImage( &coloredMask );

        /* Update exposure if needed */
        updateAbsoluteExposure( captureControl, currentExposure );
        cvSetTrackbarPos( exposure_lable, configwindowname, currentExposure );

        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7( linux version ),
        //remove higher bits using AND operator
        i = ( cvWaitKey( 10 ) & 0xff );
        switch( i ) {
            case 'g': 
                makeCalibrate( &DD_transform, &DD_transform_to, transMat, capture, captureControl, 20 );
                updateAbsoluteExposure( captureControl, currentExposure+1 );
                break;

            case 'e': 
                toggleCalibrationMode( &calibrate_exposure, &currentExposure );
                break; /* Toggles calibration mode */

            case 'c':
                openCamera( &capture, &captureControl );
                break;

            case 's': 
                show = ~show;
                break; //Toggles updating of the image. Can be useful for performance of slower machines... Or as frame freeze

            case 'm': 
                state = SELECT_MASK;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_mask;
                break; //Starts selection of masking area. Will return to dot detection once all four points are set

            case 't':
                state = SELECT_TRANSFORM;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_transform;
                break; //Starts selection of the transformation area. Returns to dot detection when done.

            case 'f':
                flip = ~flip;
                break; //Toggles horizontal flipping of the image
            case 'v':
                vflip = ~vflip;
                break; //Toggles vertical flipping of the image

            case 'w':
                warp = ~warp;
                toggleWarpOutput( warp );
                break; //Toggles showing the warped image

            case 'n':
                noiceReduction = ( noiceReduction + 1 ) % 3;
                break; //Cycles noice reduction algorithm

            case 'q': //falling through here to quit

            case  27: 
                done = 1;
                break; //ESC. Kills the whole thing( in a nice and controlled manner )
        }
        fflush( stdout ); //Make sure everything in the buffer is printed before we go on

        //PROFILING_POST_STAMP("Main loop");
    } //End of main while-loop

    // Release the capture device and do some housekeeping
    cvReleaseImage( &grabbedImage );
    cvReleaseCapture( &capture );
    cvReleaseMemStorage( &storage );
    cvDestroyWindow( imagewindowname );
    cvDestroyWindow( configwindowname );
    if( warp ) cvDestroyWindow( warpwindowname ); //If now warp it is already destroyed
    destroySendQueue( queue );
    close( sockfd );
    close( captureControl );
    return returnValue;
}