Пример #1
0
void TestAimer::Run() {
	NetworkTable *table = NetworkTable::GetTable("VisionTable");
	
	float high = table->GetNumber("high_distance");
	float test = table->GetNumber("test_distance");
	
	SmartDashboard::PutNumber("high distance", high/12);
	SmartDashboard::PutNumber("test distance", test/12);
}
Пример #2
0
// Called just before this Command runs the first time
void DetectTargetCommand::Initialize() {
	NetworkTable *table = NetworkTable::GetTable("datatable");
	NumberArray apRatioArray;
	NumberArray areaArray;
	calcComplete = false;
	//Wait(0.1);
	table->RetrieveValue("AP_RATIO", apRatioArray);
	table->RetrieveValue("AREA", areaArray);
	//Wait(0.1);
	calcComplete = Robot::camera->isHot(areaArray, apRatioArray);
}
Пример #3
0
/**
 * Maps the specified key to the specified value in this table.
 * The key can not be NULL.
 * The value can be retrieved by calling the get method with a key that is equal to the original key.
 * @param keyName the key
 * @param value the value
 */
void SmartDashboard::PutData(const char *keyName, SmartDashboardData *value)
{
    if (keyName == NULL)
    {
        wpi_setWPIErrorWithContext(NullParameter, "keyName");
        return;
    }
    if (value == NULL)
    {
        wpi_setWPIErrorWithContext(NullParameter, "value");
        return;
    }
    NetworkTable *type = new NetworkTable();
    type->PutString("~TYPE~", value->GetType());
    type->PutSubTable("Data", value->GetTable());
    m_table->PutSubTable(keyName, type);
    m_tablesToData[type] = value;
}
Пример #4
0
void TurretClass::GetData()
{
	NetworkTable * tbl = NetworkTable::GetTable("987Table");
	if (tbl != NULL)
	{
		//tbl->BeginTransaction();
		tbl->GetInt("987Seq");
		((float)tbl->GetInt("987Angle"))/1000;
		((float)tbl->GetInt("987ClickX"))/1000;
	//	ClickNumber = tbl->GetInt("987Seq");
	//	AngleX = ((float)tbl->GetInt("987Angle"))/1000;
	//	TargetX = ((float)tbl->GetInt("987ClickX"))/1000;
		//tbl->EndTransaction();
	}
	else
	{
	//	TargetX = 7;
	//	AngleX = 99;
	//	ClickNumber = 314;
	}
}
Пример #5
0
/**
 * Puts the given value into the given key position
 * @param key the key
 * @param value the value
 */
void Preferences::Put(const char *key, std::string value) {
  std::unique_lock<priority_recursive_mutex> sync(m_tableLock);
  if (key == nullptr) {
    wpi_setWPIErrorWithContext(NullParameter, "key");
    return;
  }

  if (std::string(key).find_first_of("=\n\r \t\"") != std::string::npos) {
    wpi_setWPIErrorWithContext(ParameterOutOfRange,
                               "key contains illegal characters");
    return;
  }

  std::pair<StringMap::iterator, bool> ret =
      m_values.insert(StringMap::value_type(key, value));
  if (ret.second)
    m_keys.push_back(key);
  else
    ret.first->second = value;

  NetworkTable* table = NetworkTable::GetTable(kTableName);
  table->PutString(key, value);
}
Пример #6
0
int main( int argc, const char** argv )
{
	// Flags for various UI features
	bool pause = false;       // pause playback?
	bool printFrames = false; // print frame number?
	int frameDisplayFrequency = 1;
   
	// Read through command line args, extract
	// cmd line parameters and input filename
	Args args;

	if (!args.processArgs(argc, argv))
		return -2;

	string windowName = "Bin detection"; // GUI window name
	string capPath; // Output directory for captured images
	MediaIn* cap;
	openMedia(args.inputName, cap, capPath, windowName, !args.batchMode);

	if (!args.batchMode)
		namedWindow(windowName, WINDOW_AUTOSIZE);

	// Seek to start frame if necessary
	if (args.frameStart > 0)
		cap->frameCounter(args.frameStart);

	Mat frame;

	// Minimum size of a bin at ~30 feet distance
	// TODO : Verify this once camera is calibrated
	if (args.ds)	
	   minDetectSize = cap->width() * 0.07;
	else
	   minDetectSize = cap->width() * 0.195;

	// If UI is up, pop up the parameters window
	if (!args.batchMode)
	{
		string detectWindowName = "Detection Parameters";
		namedWindow(detectWindowName);
		createTrackbar ("Scale", detectWindowName, &scale, 50, NULL);
		createTrackbar ("Neighbors", detectWindowName, &neighbors, 50, NULL);
		createTrackbar ("Min Detect", detectWindowName, &minDetectSize, 200, NULL);
		createTrackbar ("Max Detect", detectWindowName, &maxDetectSize, max(cap->width(), cap->height()), NULL);
	}

	// Create list of tracked objects
	// recycling bins are 24" wide
	TrackedObjectList binTrackingList(24.0, cap->width());

	NetworkTable::SetClientMode();
	NetworkTable::SetIPAddress("10.9.0.2"); 
	NetworkTable *netTable = NetworkTable::GetTable("VisionTable");
	const size_t netTableArraySize = 7; // 7 bins?
	NumberArray netTableArray;

	// 7 bins max, 3 entries each (confidence, distance, angle)
	netTableArray.setSize(netTableArraySize * 3);

	// Code to write video frames to avi file on disk
	VideoWriter outputVideo;
	VideoWriter markedupVideo;
	args.writeVideo = netTable->GetBoolean("WriteVideo", args.writeVideo);
	const int videoWritePollFrequency = 30; // check for network table entry every this many frames (~5 seconds or so)
	int videoWritePollCount = videoWritePollFrequency;

	FrameTicker frameTicker;

	DetectState detectState(
		  ClassifierIO(args.classifierBaseDir, args.classifierDirNum, args.classifierStageNum), 
		  gpu::getCudaEnabledDeviceCount() > 0);
	// Start of the main loop
	//  -- grab a frame
	//  -- update the angle of tracked objects 
	//  -- do a cascade detect on the current frame
	//  -- add those newly detected objects to the list of tracked objects
	while(cap->getNextFrame(frame, pause))
	{
		frameTicker.start(); // start time for this frame
		if (--videoWritePollCount == 0)
		{
			args.writeVideo = netTable->GetBoolean("WriteVideo", args.writeVideo);
			videoWritePollCount = videoWritePollFrequency;
		}

		if (args.writeVideo)
		   writeVideoToFile(outputVideo, getVideoOutName().c_str(), frame, netTable, true);

		//TODO : grab angle delta from robot
		// Adjust the position of all of the detected objects
		// to account for movement of the robot between frames
		double deltaAngle = 0.0;
		binTrackingList.adjustAngle(deltaAngle);

		// This code will load a classifier if none is loaded - this handles
		// initializing the classifier the first time through the loop.
		// It also handles cases where the user changes the classifer
		// being used - this forces a reload
		// Finally, it allows a switch between CPU and GPU on the fly
		if (detectState.update() == false)
			return -1;

		// Apply the classifier to the frame
		// detectRects is a vector of rectangles, one for each detected object
		vector<Rect> detectRects;
		detectState.detector()->Detect(frame, detectRects); 
		checkDuplicate(detectRects);

		// If args.captureAll is enabled, write each detected rectangle
		// to their own output image file. Do it before anything else
		// so there's nothing else drawn to frame yet, just the raw
		// input image
		if (args.captureAll)
			for (size_t index = 0; index < detectRects.size(); index++)
				writeImage(frame, detectRects, index, capPath.c_str(), cap->frameCounter());

		// Draw detected rectangles on frame
		if (!args.batchMode && args.rects && ((cap->frameCounter() % frameDisplayFrequency) == 0))
			drawRects(frame,detectRects);

		// Process this detected rectangle - either update the nearest
		// object or add it as a new one
		for(vector<Rect>::const_iterator it = detectRects.begin(); it != detectRects.end(); ++it)
			binTrackingList.processDetect(*it);
		#if 0
		// Print detect status of live objects
		if (args.tracking)
			binTrackingList.print();
		#endif
		// Grab info from trackedobjects. Display it and update network tables
		vector<TrackedObjectDisplay> displayList;
		binTrackingList.getDisplay(displayList);

		// Draw tracking info on display if 
		//   a. tracking is toggled on
		//   b. batch (non-GUI) mode isn't active
		//   c. we're on one of the frames to display (every frDispFreq frames)
		if (args.tracking && !args.batchMode && ((cap->frameCounter() % frameDisplayFrequency) == 0))
		    drawTrackingInfo(frame, displayList);

		if (!args.ds)
		{
		   // Clear out network table array
		   for (size_t i = 0; !args.ds & (i < (netTableArraySize * 3)); i++)
			   netTableArray.set(i, -1);

		   for (size_t i = 0; i < min(displayList.size(), netTableArraySize); i++)
		   {
			  netTableArray.set(i*3,   displayList[i].ratio);
			  netTableArray.set(i*3+1, displayList[i].distance);
			  netTableArray.set(i*3+2, displayList[i].angle);
		   }
		   netTable->PutValue("VisionArray", netTableArray);
		}

		// Don't update to next frame if paused to prevent
		// objects missing from this frame to be aged out
		// as the current frame is redisplayed over and over
		if (!pause)
			binTrackingList.nextFrame();

		// For interactive mode, update the FPS as soon as we have
		// a complete array of frame time entries
		// For args.batch mode, only update every frameTicksLength frames to
		// avoid printing too much stuff
	    if (frameTicker.valid() &&
			( (!args.batchMode && ((cap->frameCounter() % frameDisplayFrequency) == 0)) || 
			  ( args.batchMode && ((cap->frameCounter() % 50) == 0))))
	    {
			stringstream ss;
			// If in args.batch mode and reading a video, display
			// the frame count
			int frames = cap->frameCount();
			if (args.batchMode && (frames > 0))
			{
				ss << cap->frameCounter();
				if (frames > 0)
				   ss << '/' << frames;
				ss << " : ";
			}
			// Print the FPS
			ss << fixed << setprecision(2) << frameTicker.getFPS() << "FPS";
			if (!args.batchMode)
				putText(frame, ss.str(), Point(frame.cols - 15 * ss.str().length(), 50), FONT_HERSHEY_PLAIN, 1.5, Scalar(0,0,255));
			else
				cout << ss.str() << endl;
	    }

		// Driverstation Code
		if (args.ds)
		{
			// Report boolean value for each bin on the step
			bool hits[4];
			for (int i = 0; i < 4; i++)
			{
				Rect dsRect(i * frame.cols / 4, 0, frame.cols/4, frame.rows);
				if (!args.batchMode && ((cap->frameCounter() % frameDisplayFrequency) == 0))
					rectangle(frame, dsRect, Scalar(0,255,255,3));
				hits[i] = false;
				// For each quadrant of the field, look for a detected
				// rectangle contained entirely in the quadrant
				// Assume that if that's found, it is a bin
				// TODO : Tune this later with a distance range
				for (vector<TrackedObjectDisplay>::const_iterator it = displayList.begin(); it != displayList.end(); ++it)
				{
					if (((it->rect & dsRect) == it->rect) && (it->ratio > 0.15))
					{
						if (!args.batchMode && ((cap->frameCounter() % frameDisplayFrequency) == 0))
							rectangle(frame, it->rect, Scalar(255,128,128), 3);
						hits[i] = true;
					}
				}
				writeNetTableBoolean(netTable, "Bin", i + 1, hits[i]);
			}
		}

		// Various random display updates. Only do them every frameDisplayFrequency
		// frames. Normally this value is 1 so we display every frame. When exporting
		// X over a network, though, we can speed up processing by only displaying every
		// 3, 5 or whatever frames instead.
		if (!args.batchMode && ((cap->frameCounter() % frameDisplayFrequency) == 0))
		{
			// Put an A on the screen if capture-all is enabled so
			// users can keep track of that toggle's mode
			if (args.captureAll)
				putText(frame, "A", Point(25,25), FONT_HERSHEY_PLAIN, 2.5, Scalar(0, 255, 255));

			// Print frame number of video if the option is enabled
			int frames = cap->frameCount();
			if (printFrames && (frames > 0))
			{
				stringstream ss;
				ss << cap->frameCounter() << '/' << frames;
				putText(frame, ss.str(), 
				        Point(frame.cols - 15 * ss.str().length(), 20), 
						FONT_HERSHEY_PLAIN, 1.5, Scalar(0,0,255));
			}

			// Display current classifier under test
			putText(frame, detectState.print(), 
			        Point(0, frame.rows - 30), FONT_HERSHEY_PLAIN, 
					1.5, Scalar(0,0,255));

			// Display crosshairs so we can line up the camera
			if (args.calibrate)
			{
			   line (frame, Point(frame.cols/2, 0) , Point(frame.cols/2, frame.rows), Scalar(255,255,0));
			   line (frame, Point(0, frame.rows/2) , Point(frame.cols, frame.rows/2), Scalar(255,255,0));
			}
			
			// Main call to display output for this frame after all
			// info has been written on it.
			imshow( windowName, frame );

			// If saveVideo is set, write the marked-up frame to a vile
			if (args.saveVideo)
			   writeVideoToFile(markedupVideo, getVideoOutName(false).c_str(), frame, netTable, false);

			char c = waitKey(5);
			if ((c == 'c') || (c == 'q') || (c == 27)) 
			{ // exit
				if (netTable->IsConnected())
					NetworkTable::Shutdown();
				return 0;
			} 
			else if( c == ' ') { pause = !pause; }
			else if( c == 'f')  // advance to next frame
			{
				cap->getNextFrame(frame, false);
			}
			else if (c == 'A') // toggle capture-all
			{
				args.captureAll = !args.captureAll;
			}
			else if (c == 't') // toggle args.tracking info display
			{
				args.tracking = !args.tracking;
			}
			else if (c == 'r') // toggle args.rects info display
			{
				args.rects = !args.rects;
			}
			else if (c == 'a') // save all detected images
			{
				// Save from a copy rather than the original
				// so all the markup isn't saved, only the raw image
				Mat frameCopy;
				cap->getNextFrame(frameCopy, true);
				for (size_t index = 0; index < detectRects.size(); index++)
					writeImage(frameCopy, detectRects, index, capPath.c_str(), cap->frameCounter());
			}
			else if (c == 'p') // print frame number to console
			{
				cout << cap->frameCounter() << endl;
			}
			else if (c == 'P') // Toggle frame # printing to display
			{
				printFrames = !printFrames;
			}
			else if (c == 'S')
			{
				frameDisplayFrequency += 1;
			}
			else if (c == 's')
			{
				frameDisplayFrequency = max(1, frameDisplayFrequency - 1);
			}
			else if (c == 'G') // toggle CPU/GPU mode
			{
				detectState.toggleGPU();
			}
			else if (c == '.') // higher classifier stage
			{
				detectState.changeSubModel(true);
			}
			else if (c == ',') // lower classifier stage
			{
				detectState.changeSubModel(false);
			}
			else if (c == '>') // higher classifier dir num
			{
				detectState.changeModel(true);
			}
			else if (c == '<') // lower classifier dir num
			{
				detectState.changeModel(false);
			}
			else if (isdigit(c)) // save a single detected image
			{
				Mat frameCopy;
				cap->getNextFrame(frameCopy, true);
				writeImage(frameCopy, detectRects, c - '0', capPath.c_str(), cap->frameCounter());
			}
		}

		// Save frame time for the current frame
		frameTicker.end();

		// Skip over frames if needed - useful for batch extracting hard negatives
		// so we don't get negatives from every frame. Sequential frames will be
		// pretty similar so there will be lots of redundant images found
		if (args.skip > 0)
		   cap->frameCounter(cap->frameCounter() + args.skip - 1);
	}
	return 0;
}
int main(int argc, char **argv) {

  // setup Network tables for this client to talk to the robot
  NetworkTable::SetClientMode();
  NetworkTable::SetIPAddress("10.36.18.2"); // where is the robot?
  NetworkTable *table = NetworkTable::GetTable("SmartDashboard"); // what table will we interface with?
	
	cout << "Got through the network tables\n";
	
	int width = 320;
	int height = 240;
	
	const float SeekWidth[] = {23.50, 4.0}; // inches
	const float SeekHeight[] = {4.0, 32.0}; // inches
	const float SeekRatio[2] = {SeekWidth[0] / SeekHeight[0], SeekWidth[1] / SeekHeight[1]}; // 24:18 = 1.333:1
	
	float alpha = 1.0; // constrast -- don't change it

  int c;
  opterr = 0; // "I'll create all the error messages, not getopt()"
  bool ShowMask = true; // flag to make the masked image where 'target' is seen, otherwise displays raw source image
  bool ShowVideo = false;   
  while ((c = getopt (argc, argv, "b:mvs")) != -1)
    switch (c) {
      case 'b': {
        char* endptr;
        errno = 0;    /* To distinguish success/failure after call */
        long val = strtol(optarg, &endptr, 0);

        /* Check for various possible errors */
        if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN))
            || (errno != 0 && val == 0)
            || (val > 100) || (val < 0)) {
          fprintf(stderr, "Invalid integer for 'b'eta: '%s'\n",optarg);
          exit(EXIT_FAILURE);
        }
        type = val;
        break;
      }
      case 'm':
        ShowMask = true;
        break;
      case 'v': // VGA resolution (640x480, but is slow)
        width = 640;
        height = 480;
        break;
      case 's': // Show the video
        ShowVideo = true;
        break;
      case '?':
        if (optopt == 'b')
          fprintf (stderr, "Option -%c requires an argument.\n", optopt);
        else if (isprint (optopt))
          fprintf (stderr, "Unknown option `-%c'.\n", optopt);
        else
          fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt);
        return 1;
      default:
        abort ();
    }

  cout << "Attempting to initialize capturing\n";

	RaspiVid v("/dev/video0", width, height);
	cout << "Calling constructor(s)\n";
	if (!v.initialize(RaspiVid::METHOD_MMAP)) {
		cout << "Unable to initialize!\n";
		return -1;
	}
	
	cout << "Successfully initialized!\n";
	
	v.setBrightness(50); // 10 for processing; 50 for visible image
	v.startCapturing();
	
	long start[10];

  if(ShowVideo) {
    namedWindow("Vision",1);
    //createTrackbar("Thresh Type", "Vision", &type, 4, NULL); // callback not needed
	  //createTrackbar("Min Value", "Vision", &defMin, 255, NULL); // callback not needed
	  //createTrackbar("Max Value", "Vision", &defMax, 255, NULL); // callback not needed
	  createTrackbar("Percent Tall", "Vision", &defTall, 20, NULL); // callback not needed
	  createTrackbar("Percent Narr", "Vision", &defNarr, 20, NULL); // callback not needed
	} else {
	  table->PutNumber("Horizontal Percent Error", defTall);
	  table->PutNumber("Vertical Percent Error", defNarr);
	}
	
	for (int i = 0; i<10; i++)
		start[i] = getmsofday(); // pre-load with 'now'
	for (int i = 0; 1; i++) {
		// Receive key-press updates, it is required if you want to output images,
		// so the task takes a moment to update the display.
		if (waitKey(1) > 0)
			break;
		
    string fileStream = "Mask"; // Default if no table present
		if (table->IsConnected()) {
	    NetworkTable *StreamsTable = table->GetSubTable("File Streams");
	    if (StreamsTable && StreamsTable->ContainsKey("selected")) {
        fileStream = StreamsTable->GetString("selected");
      }
		}
		
		ShowMask = (fileStream == "Mask");
		
		// Grab a frame from the vision API
		VideoBuffer buffer = v.grabFrame();
		
		// Put the frame into an OpenCV image matrix with a single color (gray scale)
		Mat image(height, width, CV_8UC1, buffer.data(), false); // AKA 'Y'

    Mat dst; // this will be a RGB version of the source image
    
#if defined(YOU_WANT_RGB_COLOR_INSTEAD_OF_GREYSCALE)    
		// There is more data after the gray scale (Y) that contains U&V
		Mat cb(height/2, width/2, CV_8UC1, buffer.data()+(height*width), false); // 'U'
		Mat cr(height/2, width/2, CV_8UC1, buffer.data()+(height*width)*5/4, false); // 'V'

    // size up cb and cr to be same as y
    Mat CB;
    resize(cb,CB,cvSize(width,height));
    Mat CR;
    resize(cr,CR,cvSize(width,height));
    
    // empty image same as full (gray scale) image, but 3 channels:
    Mat ycbcr(height,width, CV_8UC3);

    Mat in[] = {image, CB, CR};
    int fromto[] = {0,0, 1,1, 2,2}; // YUV
    
    // mash 3 channels from 2 matrix into a single 3 channel matrix:
    mixChannels(in,3, &ycbcr,1, fromto,3);
    
    // convert that 3 channel YUV matrix into 3 channel RGB (displayable)
    cvtColor(ycbcr,image,CV_YCrCb2RGB);
		if (ShowMask) {
      dst = image.clone(); // make a copy, as we want dst to have the same RGB version
    }
#else
    // After calculates, we want to draw 'on' the image, showing our results
    // graphically in some fashion -- that has to happen on a RGB
		if (ShowMask) {
  		cvtColor(image,dst,CV_GRAY2RGB); // create CV_8UC3 version of same image
  		                                 // which will allow us to draw some color on top of the gray
  	}
#endif    

		int Found = 0;
  	if (!ShowMask) {
  		// Show the original image with OpenCV on the screen (could be Grey or RGB)
  		if(ShowVideo) {
  		  imshow("Vision", image);
  		}
  	  
  	  if(fileStream == "Raw") {
  	    imwrite("/tmp/stream/pic.jpg", image);
  	  }
  	  
  	}

    // alter the brightness to work better with contour finding
    //Mat new_image = image.clone();
    //image.copyTo(new_image);
    
		//Threshold the image into a new matrix with a minimum value of 1 and maximum of 255
		Mat thresh;
		//inRange(image, Scalar(1), Scalar(255), thresh);
		
		//Takes the source image and takes a thresh image 
		//applying a min search value and a max search value with a threshold type
		threshold(image, thresh, defMin, defMax, type);
		
		// Show the thresholded image with OpenCV on the screen
		
		if(ShowVideo) {
		  imshow("Threshold", thresh);
		}
		
		if(fileStream == "Threshold") {
		  imwrite("/tmp/stream/pic.jpg", thresh);
		}
		
		// Find all the contours in the thresholded image
		// The original thresholded image will be destroyed while finding contours
		vector <vector<Point> > contours;
		// CV_RETR_EXTERNAL retrieves only the extreme outer contours. 
		//                  It sets hierarchy[i][2]=hierarchy[i][3]=-1 for all the contours.
		// CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal segments and leaves only their end points. 
		//                        For example, an up-right rectangular contour is encoded with 4 points.
		findContours(thresh, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

		// Output information
		for (int c=0; c<contours.size(); c++) {

      vector<Point> hull;
      convexHull(contours[c],hull,true);
		
		  // examine each contours[c] for width / height, and if within 10% of SeekRatio keep it
		  int MinX, MinY, MaxX, MaxY;
		  MinX = MaxX = contours[c][0].x;
		  MinY = MaxY = contours[c][0].y;
#ifdef DEBUG		  
		  cout << "[" << c << "].size()=" << contours[c].size() << ":";
#endif		 
		  for (int q=1; q<contours[c].size(); q++) {
#ifdef DEBUG		  
		    cout << contours[c][q] << ",";
#endif
		    MaxX = max(MaxX,contours[c][q].x);
		    MaxY = max(MaxY,contours[c][q].y);
		    MinX = min(MinX,contours[c][q].x);
		    MinY = min(MinY,contours[c][q].y);
		  } // now the extents of the contour (rectangle?) are [MinX,MinY]==>[MaxX,MaxY]
		  int Width = MaxX - MinX;
		  int Height = MaxY - MinY;	  
		  
		  if (Height > 10) { // at least 10 pixels, otherwise it's probably noise
  		  float ThisRatio = float(Width) / float(Height);
  		  
  		  cout << "W/H=(" << Width << " " << Height << " " << ThisRatio << ") ";
  		  // defNarr is the wide one
  		  // defTall is the tall one
  		  int narrErr = table->GetNumber("Horizontal Percent Error");
  		  int tallErr = table->GetNumber("Vertical Percent Error");
  		  if ((ThisRatio >= SeekRatio[0] * (1.0 - (narrErr / 100.0)) &&
  		      ThisRatio <= SeekRatio[0] * (1.0 + (narrErr  / 100.0))) || 
  		      (ThisRatio >= SeekRatio[1] * (1.0 - (tallErr  / 100.0)) &&
  		      ThisRatio <= SeekRatio[1] * (1.0 + (tallErr / 100.0)))) {
  		    // close enough to say "this one could count"
  		    Found++;
  		    cout << "F";
  		    if (ShowMask) {
    		    // draw this contour on a copy of the image
            Scalar color( 0, 0, 255 );
            drawContours( dst, contours, c, color, CV_FILLED );
          }
   		  }
		  }
		}
		
		// Output values that the Driver Station can analyze
		
		bool hotness = false;
    
    if(Found == 2) {
      hotness = true;
    } else {
      hotness = false;
    }
    
    table->PutBoolean("Hotness", hotness);
    
		if (ShowVideo && ShowMask) {
  		imshow("Vision", dst);
    }
    
    if(ShowMask) {
		  imwrite("/tmp/stream/pic.jpg", dst);
		}
    
		long now = getmsofday();
		cout << "NumRects:" << Found << " "<< (10000 / (now-start[i%10])) << " FPS      \n";
		cout.flush();
		start[i%10] = now;
	}
	cout << "\n"; // save the last line of text
	v.destroy();
}