Beispiel #1
0
void Tracker::update(){

	//TODO: too many responsabilities, the association method could become a strategy pattern

	std::vector<Rect> rois = detector_->getDetectionRois();
	trackedObjs_ = policy_->execute(rois, trackedObjs_);
	

	//update trackable status
	for (std::vector<Trackable*>::iterator it = trackedObjs_.begin(); it != trackedObjs_.end(); ++it)
		(*it)->updateStatus();

	removeOldTracks();
	drawResults();

	//}
}
static void onMouse( int event, int x, int y, int flags, void* )
{
    pCenter = cv::Point(x,y);
    bool isKeyShift = (flags&CV_EVENT_FLAG_SHIFTKEY);
    bool isKeyCtrl  = (flags&CV_EVENT_FLAG_CTRLKEY);
    bool isKeyMLB    = (flags&CV_EVENT_FLAG_LBUTTON);
    bool isKeyMRB    = (flags&CV_EVENT_FLAG_RBUTTON);

    if(isKeyMRB) {
        contour1.clear();
//        lw1.clean();
//        lw2.clean();
        generateContour();
    }

    if(event==CV_EVENT_LBUTTONDOWN) {
//        pCenter = cv::Point(x,y);
        findNGBHPoint(contour1, pCenter, pNGBH, idxNGBH);
        findPtsInContour(contour1, pCenter, idxC1, pC1, true);
        findPtsInContour(contour1, pCenter, idxC2, pC2, false);
        //
        lw1.calcLWP(pC1,true);
        lw2.calcLWP(pC2,true);
//        drawResults();
        isDrawCircle = true;
    }
    if(event==CV_EVENT_LBUTTONUP) {
        std::cout << "\n\n" << std::endl;
        std::cout << pCenter << " : " << lw1.getCPoint() << "/" << lw2.getCPoint() << std::endl;
        printCnt(lw1.path);
        std::cout << pC1 << " / " << pC2 << std::endl;
//        std::cout << "STOP" << std::endl;
        std::vector<cv::Point> newcnt;
        int numC1 = lw1.path.size();
        int numC2 = lw2.path.size();
        int sizC0 = contour1.size();
        for(int ii=numC1; ii-->0;) {
            newcnt.push_back(lw1.path.at(ii));
        }
        for(int ii=idxC1; ; ii++) {
            if(ii>=sizC0) {
                ii=0;
            }
            newcnt.push_back(contour1.at(ii));
            if(ii==idxC2) {
                break;
            }
        }
        for(int ii=0; ii<numC2; ii++) {
            newcnt.push_back(lw2.path.at(ii));
        }
        contour1 = newcnt;
        drawResults();
    }
    if(isKeyMLB) {
//        std::cout << "(" << x << ", " << y << ")" << std::endl;
        if(lw1.isStartedPath()) {
            lw1.calcLWPath(pCenter);
            path1.clear();
            path1.push_back(lw1.getCPoint());
            path1.insert(path1.end(), lw1.path.begin(), lw1.path.end());
        }
        if(lw2.isStartedPath()) {
            lw2.calcLWPath(pCenter);
            path2.clear();
            path2.push_back(lw2.getCPoint());
            path2.insert(path2.end(), lw2.path.begin(), lw2.path.end());
        }
        isDrawCircle = true;
    } else {
        isDrawCircle = false;
    }
    //
    drawResults();
}
void caffeCallback(const sensor_msgs::ImageConstPtr& msg){
  lane_detection::Delay delay_msg;
  delay_msg.cam_frame = msg->header.stamp;
  delay_msg.start_proc = ros::Time::now();

  cv_bridge::CvImageConstPtr cv_ptr;
  try {
    cv_ptr = cv_bridge::toCvShare ( msg, sensor_msgs::image_encodings::BGR8 );
  } catch ( cv_bridge::Exception& e ) {
    ROS_ERROR ( "cv_bridge exception: %s", e.what() );
    return;
  }

  cv::Mat frame = cv_ptr->image;
  if ( frame.cols==1280 ) {
    cv::pyrDown ( frame, frame, cv::Size ( frame.cols/2, frame.rows/2 ) );
  }
  std::vector<cv::Mat> splitChannels;
  cv::split(frame,splitChannels);
  int frameSize = frame.cols*frame.rows;

  caffe::Blob<float> input(1,3,480,640);
  float *input_data = input.mutable_cpu_data();
  for(int i = 0;i<3;i++){
    std::copy(splitChannels[i].data, splitChannels[i].data+frameSize, (float*)input_data+frameSize*i);
  }
  float *mean_data = mean.mutable_cpu_data();
  for(int i =0;i<3*480*640;i++){
    input_data[i] -= mean_data[i];
  }
  std::vector<caffe::Blob<float>* > bottom_vec;
  bottom_vec.push_back(&input);
  const std::vector<caffe::Blob<float>*>& result =caffe_net->Forward(bottom_vec);

  caffe::Blob<float>* pix_blob = result[1];
  caffe::Blob<float>* bb_blob = result[0];
  
  bool predict_depth = true;
  int quad_height = pix_blob->height();
  int quad_width = pix_blob->width();
  int grid_dim=4;
  int grid_length = grid_dim*grid_dim;
  int label_height = quad_height*grid_dim;
  int label_width = quad_width*grid_dim;
  int num_regression= predict_depth ? 6:4;
  double scaling = 8.0; // ratio of image size to pix label size
  const float* pix_label = pix_blob->cpu_data();
  const float* reg_label = bb_blob->cpu_data();
  drawResults(frame, pix_label, reg_label, 
                      predict_depth, scaling, num_regression, 
                      quad_height, quad_width, grid_dim);
  
  delay_msg.end_proc = ros::Time::now();
  delay_msg.proc_time = delay_msg.end_proc-delay_msg.start_proc;
  delay_msg.cam_frame_delay = delay_msg.end_proc-delay_msg.cam_frame;
  delay_pub.publish ( delay_msg );

  // Update GUI Window
  cv::imshow(OPENCV_WINDOW, frame);
  cv::waitKey ( 3 );
}
CvBox2D *MultiCamshiftUI::track(IplImage **images, bool show_ui, bool show_backprojections) {
  for (int camera=0; camera<n_cameras; camera++) {
    /* Calculate the backprojection, in the original (YUV) colorspace */
    cvCvtPixToPlane(images[camera], planes[0], planes[1], planes[2], 0);
    cvCalcBackProject(planes, backprojection, histograms[camera]);

    if (show_ui && show_backprojections) {
      /* Make a YUV version of the output, for display */
      gray_to_yuv(backprojection, yuv_backprojections[camera]);
    }

    if (search_windows[camera].width > 0 && search_windows[camera].height > 0) {
      /* Use the CAMSHIFT algorithm to search for the object of interest */
      CvConnectedComp comp;
      cvCamShift(backprojection, search_windows[camera],
		 cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 1),
		 &comp, &results[camera]);
      search_windows[camera] = comp.rect;

      if (results[camera].size.width > 0 &&
	  results[camera].size.height > 0) {
	/* We found an interesting object, draw it if applicable */
	if (show_ui)
	  drawResults(camera, images[camera]);
      }
      else {
	/* We lost tracking, expand the search window */
	search_windows[camera].x = 0;
	search_windows[camera].y = 0;
	search_windows[camera].width = image_size.width;
	search_windows[camera].height = image_size.height;
      }
    }
  }

  if (sample_from_sample_square) {
    cvSetImageROI(images[sample_square_camera], sample_square);
    cvSetImageROI(planes[0], sample_square);
    cvSetImageROI(planes[1], sample_square);
    cvSetImageROI(planes[2], sample_square);

    cvCvtPixToPlane(images[sample_square_camera], planes[0], planes[1], planes[2], 0);
    cvCalcHist(planes, histograms[sample_square_camera], 1);

    cvResetImageROI(images[sample_square_camera]);
    cvResetImageROI(planes[0]);
    cvResetImageROI(planes[1]);
    cvResetImageROI(planes[2]);

    /* Also set the windowIn to the sampling rectangle, to point CAMSHIFT at
     * what we're interested in.
     */
    search_windows[sample_square_camera] = sample_square;
  }

  if (show_ui) {
    /* Tile cameras horizontally, with original image on
     * top and backprojection on bottom.
     */
    IplImage* view_grid[n_cameras * 2];
    int num_views = 0;
    for (int i=0; i<n_cameras; i++)
      view_grid[num_views++] = images[i];
    if (show_backprojections) {
      for (int i=0; i<n_cameras; i++)
	view_grid[num_views++] = yuv_backprojections[i];
    }

    if (draw_sample_square) {
      cvRectangle(images[sample_square_camera], cvPoint(sample_square.x-1, sample_square.y-1),
		  cvPoint(sample_square.x + sample_square.width + 1, sample_square.y + sample_square.width + 1),
		  CV_RGB(128,128,255), 1);
    }

    cv_sdl_show_yuv_tiles(view_grid, num_views, n_cameras);
  }

  return results;
}