コード例 #1
0
ファイル: Common.cpp プロジェクト: einbrain/ProphetCursor
bool findTop(cv::Point& top, int& topVal, cv::Mat* src, cv::Rect rect){
	cv::Point res(0, 0);
	int x = 0, y = 0;
	bool bFound = false;

	topVal = 65535;

	if (!src->empty()){
		try
		{
			for (int i = rect.tl().y; i < rect.br().y; ++i){
				for (int j = rect.tl().x; j < rect.br().x; ++j){
						{
							Int16 curVarVec = Convert::ToInt16((src->at<Int16>(cv::Point(j, i))) * 255.0f / 8191.0f);
							if (curVarVec < topVal && curVarVec > 0)
							{
								topVal = curVarVec;
								x = j;
								y = i;
								bFound = true;
							}
						}
				}
			}
		}
		catch(...)
		{
			// DO NOTHING
		}
	}
	//ht.nNose = src->at<Int16>(0,0);
	if(bFound) top = cv::Point(x,y);

	return bFound;
}
コード例 #2
0
ファイル: main.cpp プロジェクト: Ensiss/pima-embarque
//retourne vrai si la boundingBox a contient la bounding box b
bool contient(cv::Rect a, cv::Rect b)
{
	if ((a.tl().x < b.tl().x) &&
		(a.tl().y < b.tl().y) &&
		(a.br().x > b.br().x) &&
		(a.br().y > b.br().y))
		return true;
	return false;
}
コード例 #3
0
ファイル: ofApp.cpp プロジェクト: crx091081gb/still-breathing
void BlobPeople::update(const cv::Rect& track) {
    cur = toOf(track).getCenter();
    smooth.interpolate(cur, 0.5);
    height = track.tl().y-track.br().y;
    bottom = track.br().y;
    left = track.tl().x;
    right = track.br().x;

    
}
コード例 #4
0
ファイル: main.cpp プロジェクト: hgaiser/MasterThesis
float jaccardSimilarity(cv::Rect bbox1, cv::Rect bbox2) {
	cv::Vec4i bi(std::max(bbox1.x, bbox2.x), std::max(bbox1.y,bbox2.y), std::min(bbox1.br().x,bbox2.br().x), std::min(bbox1.br().y,bbox2.br().y));
	int iw = bi[2] - bi[0];
	int ih = bi[3] - bi[1];
	if (iw > 0 && ih > 0) {
		int un = (bbox1.br().x - bbox1.x) * (bbox1.br().y - bbox1.y) +
		(bbox2.br().x - bbox2.x) * (bbox2.br().y - bbox2.y) - iw * ih;
		return iw * ih / float(un);
	}

	return 0.f;
}
コード例 #5
0
void LevelTracker::setRegion(const cv::Rect &roi)
{
    topLeft=roi.tl();
    bottonRight=roi.br();
    if(topLeft.x<bottonRight.x && topLeft.y<bottonRight.y)
        regOk=true;
}
コード例 #6
0
ファイル: faceinvaders.cpp プロジェクト: m4dm4x/FaceInvaders
static bool checkColision(const cv::Rect &a, const cv::Rect &b){
	if (b.contains(a.tl())) return true;
	if (b.contains(a.br())) return true;
	if (b.contains(cv::Point(a.x+a.width,a.y))) return true;
	if (b.contains(cv::Point(a.x,a.y+a.height))) return true;
	return false;
}
コード例 #7
0
ファイル: geomapeditor.cpp プロジェクト: vokintsop/visiworld
bool GeoMapEditor::addMouseObject( // пытаемся добавить новый объект вытянув или кликнув мышкой
    cv::Rect& rect, // note: in-out -- подкручиваем ректангл по законам первого рождения для данного объекта
    int flags )
{
  
  if (objType() == "AGM_Segm")
  {
    Point xyTL = rect.tl();
    Point xyBR = rect.br();
    GeoSheet& sh = gm.sheets[ cur_sheet ];
    Point2d enTL = sh.xy2en( xyTL );
    Point2d enBR = sh.xy2en( xyBR );

    AGM_Segm* ps = new AGM_Segm(enTL, enBR);
    gm.objects.push_back(cv::Ptr<AGM_Segm>(ps));
  }
  else
  {
    Point xy = center( rect );
    GeoSheet& sh = gm.sheets[ cur_sheet ];
    Point2d en = sh.xy2en( xy );
    AGM_Point* pp = new AGM_Point( en );
    gm.objects.push_back(cv::Ptr<AGM_Point>(pp));
  }

  return true;
};
コード例 #8
0
inline static cv::Point center(const cv::Rect &rc)
{
	std::vector<cv::Point> pts;
	pts.push_back(rc.tl());
	pts.push_back(rc.br());

	return center(pts);
}
コード例 #9
0
ファイル: main.cpp プロジェクト: huyunf/opencv
// A litte subroutine to draw a box onto an image
//
void draw_box(cv::Mat& img, cv::Rect box) {
	cv::rectangle(
		img,
		box.tl(),
		box.br(),
		cv::Scalar(0x00, 0x00, 0xff) /* red */
		);
}
コード例 #10
0
ファイル: rectangle.cpp プロジェクト: apennisi/annotationtool
void Rectangle::set(const cv::Rect &_rect)
{
    rect = _rect;
    tl = _rect.tl();
    br = _rect.br();
    left->set(tl, cv::Point(tl.x, br.y));
    top->set(tl, cv::Point(br.x, tl.y));
    bottom->set(tl, cv::Point(tl.x, br.y));
    right->set(cv::Point(br.x, tl.y), cv::Point(tl.x, br.y));
    info.set(_rect);
}
コード例 #11
0
ファイル: helpers.hpp プロジェクト: Amos-zq/4dface
/**
 * @brief Scales and translates a facebox. Useful for converting
 * between face boxes from different face detectors.
 *
 * To convert from V&J faceboxes to ibug faceboxes, use a scaling
 * of 0.85 and a translation_y of 0.2.
 * Ideally, we would learn the exact parameters from data.
 *
 * @param[in] facebox Input facebox.
 * @param[in] scaling The input facebox will be scaled by this factor.
 * @param[in] translation_y How much, in percent of the original facebox's width, the facebox will be translated in y direction. A positive value means facebox moves downwards.
 * @return The rescaled facebox.
 */
cv::Rect rescale_facebox(cv::Rect facebox, float scaling, float translation_y)
{
	// Assumes a square input facebox to work? (width==height)
	const auto new_width = facebox.width * scaling;
	const auto smaller_in_px = facebox.width - new_width;
	const auto new_tl = facebox.tl() + cv::Point2i(smaller_in_px / 2.0f, smaller_in_px / 2.0f);
	const auto new_br = facebox.br() - cv::Point2i(smaller_in_px / 2.0f, smaller_in_px / 2.0f);
	cv::Rect rescaled_facebox(new_tl, new_br);
	rescaled_facebox.y += facebox.width * translation_y;
	return rescaled_facebox;
};
コード例 #12
0
/** expand a roi **/
void Cropper::expandROI(cv::Rect &r) {
    bool canExpand = true;
    Point2i p1, p2;
    cv::Rect rp;
    
    while(canExpand){
        canExpand = false;
        Point2i stp = (rect.tl() - r.tl());
        stp.x /=2; stp.y /=2;
        p2 = r.br();
        while(abs(stp.x) + abs(stp.y)>=1){
            p1 = r.tl() + stp;
            rp = cv::Rect(p1, p2);
            if(!rectHasBlankPixels(rp)){
                r = rp;
                canExpand = true;
                break;
            }
            stp.x /= 2; stp.y /=2;
        }
        
        stp = (rect.br() - r.br());
        stp.x /=2; stp.y /=2;
        p1 = r.tl();
        while(abs(stp.x) + abs(stp.y)>=1){
            p2 = r.br() + stp;
            rp = cv::Rect(p1, p2);
            if(!rectHasBlankPixels(rp)){
                r = rp;
                canExpand = true;
                break;
            }
            stp.x /=2; stp.y/=2;
        }
    }
}
コード例 #13
0
std::pair<double, cv::Point> DescribeMaximum(const cv::Mat &surface,
                                             const cv::Rect &boundingBox) {
  // assert surface double
  cv::Point position = boundingBox.tl();
  cv::Point end = boundingBox.br();
  size_t x = position.x, y = position.y;
  double value = surface.at<double>(position);
  for (; x <= end.x; x++) {
    for (; y <= end.y; y++) {
      if (surface.at<double>(y, x) > value) {
        value = surface.at<double>(y, x);
        position = cv::Point(x, y);
      }
    }
  }
  return std::make_pair(value, position);
}
コード例 #14
0
// === FUNCTION ======================================================================
// Name: get_masked_frame
// Description: Masks the frame based on slope and roi. Mask returned by pointer.
// =====================================================================================
cv::Mat get_masked_frame ( cv::Rect roi, double slope, cv::Mat* frame, cv::Mat* mask )
{
    cv::Point corners[1][4];
    //Set the frame
    *mask=cv::Mat::zeros( frame->size(), CV_8UC1 );
    cv::Mat masked_frame;
    if( slope==0 )
    {
        // TODO: Could use direction handling here.
        corners[0][0] = roi.br();
        corners[0][1] = cv::Point( frame->cols, roi.y+roi.height );
        corners[0][2] = corners[0][1]-cv::Point( 0, roi.height );
        corners[0][3] = corners[0][0]-cv::Point( 0, roi.height );
    }
    else if( isinf( slope ) )
    {
        {
            corners[0][0] = cv::Point( roi.x, frame->rows );
            corners[0][1] = cv::Point( roi.x, roi.y+roi.height);
        }
        {
            corners[0][0] = roi.tl();
            corners[0][1] = cv::Point( roi.x, 0 );
        }
        corners[0][2] = corners[0][1]+cv::Point( roi.width, 0);
        corners[0][3] = corners[0][0]+cv::Point( roi.width, 0 );
    }
    else
    {
        corners[0][0].x = ( int ) ( (frame->rows + slope*roi.x-roi.y)/slope );
        corners[0][0].y = frame->rows;
        corners[0][1] = cv::Point( ( int )( (-roi.y + slope * roi.x ) / slope ), 0 );
        corners[0][2] = corners[0][1] + cv::Point(roi.width, 0);
        corners[0][3] = corners[0][0] + cv::Point(roi.width, 0);
    }

    // This is weird, but follows OpenCV docs.
    const cv::Point* ppt[1] = { corners[0] };
    const int npt[] = { 4 };

    fillPoly( *mask, ppt, npt, 1, 255 );
    frame->copyTo(masked_frame, *mask);
    return masked_frame;
}	// ----- end of function get_masked_frame ----- 
コード例 #15
0
ファイル: rectangle.cpp プロジェクト: apennisi/annotationtool
Rectangle::Rectangle(const cv::Rect &_rect, const int &_id) :
    rect(_rect)
{
    tl = _rect.tl();
    br = _rect.br();
    left = new VerticalLine();
    left->set(tl, cv::Point(tl.x, br.y));
    top = new HorizontalLine();
    top->set(tl, cv::Point(br.x, tl.y));
    bottom = new HorizontalLine();
    bottom->set(cv::Point(tl.x, br.y), br);
    right = new VerticalLine();
    right->set(cv::Point(br.x, tl.y), br);
    info.set(_id, _rect);
    selected = false;
    selected_color = RED_COLOR;
    offset = 4;
    lineOffset = LINE_WEIGHT;
    color = cv::Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );

}
コード例 #16
0
ファイル: cvx.cpp プロジェクト: LeszekSwirski/singleeyefitter
void singleeyefitter::cvx::getROI(const cv::Mat& src, cv::Mat& dst, const cv::Rect& roi, int borderType)
{
    cv::Rect bbSrc = boundingBox(src);
    cv::Rect validROI = roi & bbSrc;
    if (validROI == roi)
    {
        dst = cv::Mat(src, validROI);
    }
    else
    {
        // Figure out how much to add on for top, left, right and bottom
        cv::Point tl = roi.tl() - bbSrc.tl();
        cv::Point br = roi.br() - bbSrc.br();

        int top = std::max(-tl.y, 0);  // Top and left are negated because adding a border
        int left = std::max(-tl.x, 0); // goes "the wrong way"
        int right = std::max(br.x, 0);
        int bottom = std::max(br.y, 0);

        cv::Mat tmp(src, validROI);
        cv::copyMakeBorder(tmp, dst, top, bottom, left, right, borderType);
    }
}
コード例 #17
0
ファイル: p3-20.cpp プロジェクト: PowerFire/prg_OpenCv
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//
//	自訂的矩行繪製函數
//
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void DrawRectangle( Mat& img, cv::Rect box)
{
    //隨機顏色
    rectangle(img, box.tl(), box.br(), Scalar(g_rng.uniform(0,255),
              g_rng.uniform(0,255), g_rng.uniform(0,255)));
}
コード例 #18
0
ファイル: runner.cpp プロジェクト: TuringKi/legit
int main( int argc, char** argv)
{

// Initialization

  traxmode = false;
  silent = false;
  interactive = false;
  opterr = 0;
  initializeFile = NULL;
  configFile = NULL;
  configString = NULL;
  outputFile = NULL;

#ifdef BUILD_INTROSPECTION
  introspectionFile = NULL;
#endif
  int c = 0;
  seed = time(NULL);

  initialized = false;
  initialize = false;
  run = true;

  while ((c = getopt(argc, argv, CMD_OPTIONS)) != -1)
    switch (c)
      {
      case 'h':
        print_help();
        exit(0);
      case 's':
        silent = true;
        break;
      case 'g':
        enable_gui();
        break;
      case 'i':
        interactive = true;
        break;
#ifdef BUILD_DEBUG
      case 'd':
        __debug_enable();
        break;
#endif
      case 'C':
        configFile = optarg;
        break;
      case 'c':
        configString = optarg;
        break;
      case 'I':
        initializeFile = optarg;
        break;
      case 'o':
        outputFile = optarg;
        break;
#ifdef BUILD_INTROSPECTION
      case 'D':
        introspectionFile = optarg;
        break;
#endif
      case 'S':
        seed = atoi(optarg);
        break;
#ifdef BUILD_TRAX
      case 't':
        traxmode = true;
        break;
#endif
      default:
        print_help();
        fprintf(stderr, "Unknown switch '-%c'\n", optopt);
        exit(-1);
      }

  RANDOM_SEED(seed);

  DEBUGMSG("Random seed: %d \n", seed);

  sequence = NULL;

  if (!traxmode)
    {

      if (optind < argc)
        {

          sequence = open_sequence(argv[optind]);

          if (!sequence)
            {
              fprintf(stderr, "Illegal source\n");
              exit(-1);
            }
          VideoFileSequence* videostream = dynamic_cast<VideoFileSequence*>(sequence);
          ImageDirectorySequence* dirstream = dynamic_cast<ImageDirectorySequence*>(sequence);
          FileListSequence* liststream = dynamic_cast<FileListSequence*>(sequence);
          if (videostream != NULL)
            {

              const char* filename = videostream->get_file();
              initialize = true;

              if (!initializeFile && filename)
                {
                  initializeFile = new char[strlen(filename) + 5];
                  strcpy(initializeFile, filename);
                  strcpy(initializeFile+strlen(filename), ".txt");
                }

            }
          else if (dirstream != NULL)
            {

              const char* filename = dirstream->get_directory();
              initialize = true;

              if (!initializeFile && filename)
                {
                  initializeFile = new char[strlen(filename) + 5];
                  strcpy(initializeFile, filename);
                  strcpy(initializeFile + strlen(filename) +
                         (matches_suffix(filename, FILE_DELIMITER_STR) ? -1 : 0),
                         ".txt");
                }

            }
          else if (liststream != NULL && initializeFile)
            {

              initialize = true;

            }
          else if (dynamic_cast<CameraSequence*>(sequence) != NULL)
            {

              interactive = false;

            }

        }
      else
        {
          fprintf(stderr, "No source given\n");
          exit(-1);
        }
    }

// Tracking

  tracking_window = is_gui_enabled() ? ImageWindow::create(WINDOW_NAME, Size(10, 10), false) : NULL;

  if (configFile)
    {
      if (file_type(configFile) == FILETYPE_FILE)
        {
          DEBUGMSG("Reading config from '%s' \n", configFile);
          config.load_config(string(configFile));
        }
      else
        {
          DEBUGMSG("Reading built-in configuration '%s' \n", configFile);
          read_builtin_config(configFile, config);
        }
    }

  if (configString)
    {
      for (int i = 0; i < strlen(configString); i++)
        {
          if (configString[i] == ';') configString[i] = '\n';
        }
      istringstream moreConfig(configString);
      moreConfig >> config;
    }

  DEBUGGING
  {

    vector<string> configKeys = config.keys();
    for (int i = 0; i < configKeys.size(); i++)
      {
        string value = config.read<string>(configKeys[i]);
        DEBUGMSG("Config dump: %s = %s\n", configKeys[i].c_str(), value.c_str());
      }

  }

  initialize_canvases(config);

  if (config.keyExists("window.main.output"))
    {
      tracking_window->set_output(config.read<string>("window.main.output"), config.read<int>("window.main.output.slowdown", 1));

    }

#ifdef BUILD_TRAX
  trax_properties* trax_out_properties = NULL;
  if (traxmode)
    {
#ifdef TRAX_DEBUG
      trax_setup("trax.log", TRAX_LOG_INPUT |  TRAX_LOG_OUTPUT | TRAX_LOG_DEBUG);
#else
      trax_setup(NULL, 0);
#endif
      trax_out_properties = trax_properties_create();
    }
#endif

  Ptr<Tracker> tracker;
  Image frame;

  int frameNumber = 1;

  ImageWindow::grid(2, 3, 400, 400);


  std::ofstream rectangle_output;
  if (outputFile)
    rectangle_output.open( outputFile , std::ifstream::out);

  for(; run;)
    {

      long start_time = clock();

      char key = -1;

      /////////////////////         GET IMAGE             ////////////////////

      if (!traxmode)
        {
          frame.capture(sequence);
        }
#ifdef BUILD_TRAX
      else
        {

          trax_imagepath path;
          trax_rectangle rect;
          trax_properties* prop = trax_properties_create();

          int tr = trax_wait(path, prop, &rect);

          if (tr == TRAX_INIT)
            {

              start.x = rect.x;
              start.y = rect.y;
              start.width = rect.width;
              start.height = rect.height;

              trax_properties_enumerate(prop, trax_properties_to_config, &config);

              initialize = true;

              initialized = false;

            }
          else if (tr == TRAX_FRAME)
            {

              if (!initialized)
                {
                  trax_properties_release(&prop);
                  break;
                }
            }
          else
            {
              trax_properties_release(&prop);
              break;
            }

          trax_properties_release(&prop);

          frame.load(string(path));

          trax_properties_clear(trax_out_properties);
        }
#endif

      if (frame.empty())
        {
          DEBUGMSG("No new frame available, quitting.\n");
          break;
        }

      if (frameNumber == 1 && !traxmode)
        {
          if (is_gui_enabled()) tracking_window->resize(Size(frame.width(), frame.height()));
          int size = MAX(frame.width() / 8,  frame.height() / 8);
          start.width = size;
          start.height = size;
          start.x = (frame.width() - start.width) / 2;
          start.y = (frame.height() - start.height) / 2;
        }

      /////////////////////         PROCESSING            ////////////////////

      if (!initialized && initialize)
        {
          DEBUGMSG("Initializing using rectangle from '%s'\n", initializeFile);
          if (initializeFile)
            {
              DEBUGMSG("Initializing using rectangle from '%s'\n", initializeFile);
              start = read_rectangle(initializeFile);
              if (is_gui_enabled() && (start.width < 1 || start .height < 1))
                {
                  Mat rgb = frame.get_rgb();
                  tracking_window->draw(rgb);
                  tracking_window->push();
                  if (tracking_window->queryrect(start))
                    {
                      write_rectangle(initializeFile, start);
                    }
                }
            }

          if (tracker.empty())
            {

              if (!config.keyExists("tracker"))
                throw LegitException("Unknown tracker type");

              tracker = create_tracker(config.read<string>("tracker"), config, "default");

              if (!silent)
                {
                  performance_observer = new PerformanceObserver(tracker->get_stages());
                  tracker->add_observer(performance_observer);
                }
#ifdef BUILD_TRAX
              if (traxmode)
                {
                  trax_observer = new TraxObserver();
                  tracker->add_observer(trax_observer);
                  trax_properties_set_int(trax_out_properties, "seed", seed);
                }
#endif

#ifdef BUILD_INTROSPECTION
              if (introspectionFile)
                {
                  introspection_observer = new IntrospectionObserver(frame.width(), frame.height(), seed);
                  ((IntrospectionObserver *)&(*introspection_observer))->configuration(config);
                  tracker->add_observer(introspection_observer);
                }
#endif
            }

          tracker->initialize(frame, start);
          initialized = true;

        }
      else if (!tracker.empty())
        {
          frameNumber++;
          if (!traxmode && sequence->is_finite() && sequence->position() >= sequence->size())
            {
              DEBUGMSG("End frame reached, quitting.\n");
              run = false;
            }

          long timer = clock();
          tracker->update(frame);

          if (!silent && !traxmode) printf("Frame %d - elapsed time: %d ms\n", frameNumber, (int)(((clock() - timer) * 1000) / CLOCKS_PER_SEC));
        }

      /////////////////////       OUTPUT RESULTS          ////////////////////

#ifdef BUILD_TRAX
      if (traxmode)
        {
          trax_rectangle region;

          if (tracker->is_tracking())
            {

              Rect rect = tracker->region();
              region.x = rect.x;
              region.y = rect.y;
              region.width = rect.width;
              region.height = rect.height;

            }
          else
            {

              region.x = 0;
              region.y = 0;
              region.width = 0;
              region.height = 0;

            }

          trax_report_position(region, trax_out_properties);// properties);

        }
#endif

      if (is_gui_enabled() && tracking_window)
        {
          Mat rgb = frame.get_rgb();
          tracking_window->draw(rgb);

          if (initialized)
            {

              tracker->visualize(*tracking_window);

              cv::Rect region = tracker->region();

              tracking_window->rectangle(region, COLOR_YELLOW, 2);

            }
          else
            {

              tracking_window->rectangle(start.tl(), start.br(), Scalar(255, 255, 255), 1);
            }

          tracking_window->push();
        }

      if (rectangle_output.is_open())
        {
          cv::Rect rectangle = initialized ? tracker->region() : start;
          rectangle_output << rectangle.x << "," << rectangle.y << "," << rectangle.width << "," << rectangle.height << std::endl;
        }

      /////////////////////        HANDLE INPUT           ////////////////////

      if (!traxmode)
        {
          if (is_gui_enabled())
            {

              if (interactive)
                {

                  switch (pause())
                    {
                    case 27:
                      run = false;
                      break;
                    case 'p':
                      interactive = !interactive;
                      break;
                    }

                }
              else
                {

                  long wait = throttle ? MAX(1, (limitFrameTime - (clock() - start_time))) : 1;
                  key = waitKey(wait);
                  switch (key)
                    {
                    case 27:
                      run = false;
                      break;
                    case ' ':
                      initialize = true;
                      break;
                    }

                }
            }
          else
            {
              if (throttle)
                {
                  long wait = MAX(1, (limitFrameTime - (clock() - start_time)));
                  sleep(wait);
                }
            }

          if (!tracker.empty() && !tracker->is_tracking())
            {
              DEBUGMSG("Target lost\n");
              run = false;
            }

        }
#ifdef BUILD_TRAX
      else
        {
          if (is_gui_enabled()) waitKey(1);
        }
#endif

    }

  if (!performance_observer.empty())
    {
      ((PerformanceObserver *)&(*performance_observer))->print();
    }

// Cleanup

#ifdef BUILD_INTROSPECTION
  if (!introspection_observer.empty())
    {
      DEBUGMSG("Storing introspection data to %s\n", introspectionFile);
      ((IntrospectionObserver *)&(*introspection_observer))->store(introspectionFile, true);
    }
#endif

#ifdef BUILD_TRAX
  trax_properties_release(&trax_out_properties);
  if (traxmode)
    trax_cleanup();
#endif

  if (rectangle_output.is_open())
    {
      rectangle_output.close();
    }

  if (sequence)
    delete sequence;

}
コード例 #19
0
ファイル: 18_UseMouse.cpp プロジェクト: mysticTot/learn_c
//-----------------------------------【DrawRectangle( )函数】------------------------------
//		描述:自定义的矩形绘制函数
//-----------------------------------------------------------------------------------------------
void DrawRectangle( cv::Mat& img, cv::Rect box )
{
	cv::rectangle(img,box.tl(),box.br(),cv::Scalar(g_rng.uniform(0, 255), g_rng.uniform(0,255), g_rng.uniform(0,255)));//随机颜色
}
コード例 #20
0
ファイル: objectFinder.cpp プロジェクト: tanismar/affordances
    bool respond(const Bottle &      command,
                 Bottle &      reply)
    {
        // Get command string
        string receivedCmd = command.get(0).asString().c_str();
       
        bool f;
        int responseCode;   //Will contain Vocab-encoded response

        reply.clear();  // Clear reply bottle        
        if (receivedCmd == "getPointClick")
        {
            if (! getPointClick())
            {
                //Encode response
                responseCode = Vocab::encode("nack");
                reply.addVocab(responseCode);
            }
            else
            {
                //Encode response                
                responseCode = Vocab::encode("ack");
                reply.addVocab(responseCode);
                Bottle& bCoords = reply.addList();
                bCoords.addInt(coords2D(0));
                bCoords.addInt(coords2D(1));
                //bCoords.addDouble(coords3D(2));
            }
            return true;

        } else if (receivedCmd == "getPointTrack")
        {   
            tableHeight = command.get(1).asDouble();

            if (!getPointTrack(tableHeight))
            {
                //Encode response
                responseCode = Vocab::encode("nack");
                reply.addVocab(responseCode);
            }
            else
            {
                //Encode response              
                printf("Sending out 3D point!!\n");
                responseCode = Vocab::encode("ack");
                reply.addVocab(responseCode);
                Bottle& bCoords = reply.addList();
                bCoords.addDouble(coords3D(0));
                bCoords.addDouble(coords3D(1));
                bCoords.addDouble(coords3D(2));
                printf("Coords Bottle %s \n", bCoords.toString().c_str());
                printf("sent reply %s \n", reply.toString().c_str());
            }
            return true;

        } else if (receivedCmd == "getBox")
        {   
            if (!getBox())
            {
                //Encode response
                responseCode = Vocab::encode("nack");
                reply.addVocab(responseCode);
            }
            else
            {
                //Encode response                
                responseCode = Vocab::encode("ack");
                reply.addVocab(responseCode);
                Bottle& bBox = reply.addList();
                bBox.addInt(BBox.tl().x);
                bBox.addInt(BBox.tl().y);
                bBox.addInt(BBox.br().x);
                bBox.addInt(BBox.br().y);                
            }
        }
        else if (receivedCmd == "help")
        {
            reply.addVocab(Vocab::encode("many"));
            reply.addString("Available commands are:");
            reply.addString("help");
            reply.addString("quit");           
            reply.addString("getPointClick - reads a click and returns the 2D coordinates");
            reply.addString("getPointTrack - Retrieves 2D coords and returns the 3D coordinates of the object tracked based on the table Height");
            reply.addString("getBox - Crops the image based on user input and creates a template for the tracker with it");
            return true;
        }
        else if (receivedCmd == "quit")
        {
            reply.addString("Quitting.");
            return false; 
        } else{        
            reply.addString("Invalid command, type [help] for a list of accepted commands.");
        }
        return true;
    }
コード例 #21
0
	/*! @brief create a single bounding box in 3D
	 *
	 * Given an image, its depth correspondences and a candidate,
	 * return an approximate 3D bounding box which encapsulates the object
	 * @param im the color image
	 * @param depth the depth image (may be of different resolution to the color image)
	 * @return
	 */
	Rect3d boundingBox3D(const cv::Mat& im, const cv::Mat& depth) const {

		const unsigned int nparts = parts_.size();
		const cv::Rect bounds = cv::Rect(0,0,0,0) + im.size();
		const cv::Rect bb  = this->boundingBox();
		const cv::Rect bbn = this->boundingBoxNorm();

		cv::Size_<double> imsize = im.size();
		cv::Size_<double> dsize  = depth.size();
		cv::Point_<double> s = cv::Point_<double>(dsize.width / imsize.width, dsize.height / imsize.height);

		cv::Mat_<float> points;
		std::vector<cv::Rect> boxes;
		for (unsigned int n = 0; n < nparts; ++n) {
			// only keep the intersection of the part with the image frame
			boxes.push_back(parts_[n] & bounds);
		}
		boxes.push_back(bbn & bounds);

		for (unsigned int n = 0; n < boxes.size(); ++n) {
			// scale the part down to match the depth image size
			cv::Rect& r = boxes[n];
			r.x = r.x * s.x;
			r.y = r.y * s.y;
			r.width  = r.width  * s.x;
			r.height = r.height * s.y;

			// add the valid points
			cv::Mat_<float> part = depth(r);
			if(part.empty())
				continue;

			for (cv::MatIterator_<float> it = part.begin(); it != part.end(); ++it) {
				if (*it != 0 && !std::isnan(*it)) points.push_back(*it);
			}

			if(points.empty())
			{
				return Rect3d(std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN(),
						0, 0, 0);
			}

		}

		// sort the points
		std::sort(points.begin(), points.end());
		cv::resize(points, points, cv::Size(1, 400));

		// get the median of the points
		const unsigned int M = points.rows;
		const unsigned int midx = M/2;
		float median = points[midx][0];

		// filter the points
		cv::Mat_<float> g, dog, dpoints;
		cv::Matx<float, 3, 1> diff(-1, 0, 1);
		g= cv::getGaussianKernel(35, 4, CV_32F);
		cv::filter2D(g, dog, -1, diff);
		cv::filter2D(points, dpoints, -1, dog);

		// starting at the median point, walk up and down until a gradient threshold (0.1)
		unsigned int dminidx = midx, dmaxidx = midx;
		for (unsigned int m = midx; m < M; ++m) {
			if (fabs(dpoints[m][0]) > 0.035) break;
			dmaxidx = m;
		}
		for (int m = midx; m >= 0; --m) {
			if (fabs(dpoints[m][0]) > 0.035) break;
			dminidx = m;
		}

		// construct the 3D bounding box
		cv::Point3_<double> tl(bb.x,      bb.y,      points[dminidx][0]);
		cv::Point3_<double> br(bb.br().x, bb.br().y, points[dmaxidx][0]);

		return Rect3d(tl, br);
	}
コード例 #22
0
void GroundTruthEvaluation::evaluateLocalizer(const int currentFrameNumber, taglist_t const& taglist)
{
	GroundTruth::LocalizerEvaluationResults& results = _localizerResults;

	// iterate over ground truth data (typed Grid3D), convert them to PipelineGrids
	// and store them in the std::set taggedGridsOnFrame
    for (GroundTruthGridSPtr const& grid : _groundTruthData.at(currentFrameNumber))
	{
        if (grid) {
            results.taggedGridsOnFrame.insert(grid);
        }
	}

	// Detect False Negatives
    for (const GroundTruthGridSPtr& grid : results.taggedGridsOnFrame)
	{
		// ROI of ground truth
		const cv::Rect gridBox = grid->getBoundingBox();

		bool inGroundTruth = false;
		for (const pipeline::Tag& tag : taglist)
		{
			// ROI of pipeline tag
            const cv::Rect& tagBox = tag.getRoi();

			// target property is complete containment
			if (tagBox.contains(gridBox.tl())
			    && tagBox.contains(gridBox.br()))
			{
				inGroundTruth = true;
				break;
			}
		}

		// mark current ground truth box as missing detection (false negative)
		if (!inGroundTruth)
			results.falseNegatives.insert(grid);
	}

	// Detect False Positives
	// ground truth grids
	std::set<std::shared_ptr<PipelineGrid>> notYetDetectedGrids = results.taggedGridsOnFrame;

	// iterate over all pipeline tags
	for (const pipeline::Tag& tag : taglist)
	{
		// ROI of pipeline tag
        const cv::Rect& tagBox = tag.getRoi();

		bool inGroundTruth = false;

		// iterate over all ground truth grids
		for (const std::shared_ptr<PipelineGrid>& grid : notYetDetectedGrids)
		{
			// ROI of ground truth grid
			const cv::Rect gridBox = grid->getBoundingBox();

			if (tagBox.contains(gridBox.tl())
			    && tagBox.contains(gridBox.br()))
			{
				// match!
				inGroundTruth = true;

				// store pipeline tag in extra set
				results.truePositives.insert(tag);

				// store mapping from roi -> grid
				results.gridByTag[tag] = grid;

				// this modifies the container in a range based for loop, which may invalidate
				// the iterators. This is not problem in this specific case because we exit the loop
				// right away. (beware if that is going to be changed)
				notYetDetectedGrids.erase(grid);
				break;
			}
		}

		// if no match was found, the pipeline-reported roi is a false detection
		if (!inGroundTruth)
			results.falsePositives.insert(tag);
	}
}