コード例 #1
0
ファイル: geomapeditor.cpp プロジェクト: vokintsop/visiworld
bool GeoMapEditor::addMouseObject( // пытаемся добавить новый объект вытянув или кликнув мышкой
    cv::Rect& rect, // note: in-out -- подкручиваем ректангл по законам первого рождения для данного объекта
    int flags )
{
  
  if (objType() == "AGM_Segm")
  {
    Point xyTL = rect.tl();
    Point xyBR = rect.br();
    GeoSheet& sh = gm.sheets[ cur_sheet ];
    Point2d enTL = sh.xy2en( xyTL );
    Point2d enBR = sh.xy2en( xyBR );

    AGM_Segm* ps = new AGM_Segm(enTL, enBR);
    gm.objects.push_back(cv::Ptr<AGM_Segm>(ps));
  }
  else
  {
    Point xy = center( rect );
    GeoSheet& sh = gm.sheets[ cur_sheet ];
    Point2d en = sh.xy2en( xy );
    AGM_Point* pp = new AGM_Point( en );
    gm.objects.push_back(cv::Ptr<AGM_Point>(pp));
  }

  return true;
};
コード例 #2
0
ファイル: faceinvaders.cpp プロジェクト: m4dm4x/FaceInvaders
static bool checkColision(const cv::Rect &a, const cv::Rect &b){
	if (b.contains(a.tl())) return true;
	if (b.contains(a.br())) return true;
	if (b.contains(cv::Point(a.x+a.width,a.y))) return true;
	if (b.contains(cv::Point(a.x,a.y+a.height))) return true;
	return false;
}
コード例 #3
0
void LevelTracker::setRegion(const cv::Rect &roi)
{
    topLeft=roi.tl();
    bottonRight=roi.br();
    if(topLeft.x<bottonRight.x && topLeft.y<bottonRight.y)
        regOk=true;
}
コード例 #4
0
ファイル: Common.cpp プロジェクト: einbrain/ProphetCursor
bool findTop(cv::Point& top, int& topVal, cv::Mat* src, cv::Rect rect){
	cv::Point res(0, 0);
	int x = 0, y = 0;
	bool bFound = false;

	topVal = 65535;

	if (!src->empty()){
		try
		{
			for (int i = rect.tl().y; i < rect.br().y; ++i){
				for (int j = rect.tl().x; j < rect.br().x; ++j){
						{
							Int16 curVarVec = Convert::ToInt16((src->at<Int16>(cv::Point(j, i))) * 255.0f / 8191.0f);
							if (curVarVec < topVal && curVarVec > 0)
							{
								topVal = curVarVec;
								x = j;
								y = i;
								bFound = true;
							}
						}
				}
			}
		}
		catch(...)
		{
			// DO NOTHING
		}
	}
	//ht.nNose = src->at<Int16>(0,0);
	if(bFound) top = cv::Point(x,y);

	return bFound;
}
コード例 #5
0
ファイル: EvaluationRun.cpp プロジェクト: brixen/Antrack
double overlapJaccard(cv::Rect& r1,cv::Rect& r2){
 
    double overlap=(r1 & r2).area();
    
    double overlapJaccard=overlap/(r1.area()+r2.area()-overlap);
    
    return overlapJaccard;
}
コード例 #6
0
inline static cv::Point center(const cv::Rect &rc)
{
	std::vector<cv::Point> pts;
	pts.push_back(rc.tl());
	pts.push_back(rc.br());

	return center(pts);
}
コード例 #7
0
ファイル: main.cpp プロジェクト: huyunf/opencv
// A litte subroutine to draw a box onto an image
//
void draw_box(cv::Mat& img, cv::Rect box) {
	cv::rectangle(
		img,
		box.tl(),
		box.br(),
		cv::Scalar(0x00, 0x00, 0xff) /* red */
		);
}
コード例 #8
0
//[0; 1] (0.5 when different_area == common_area)
inline float rect_similarity(const cv::Rect &r1, const cv::Rect &r2)
{
	float common = (r1 & r2).area();
	float different = (r1.area() + r2.area() - 2.0f * common);
	if (different > FLT_EPSILON)
		return std::min(0.5f * common / different, 1.0f);
	else
		return 1.0f;
}
コード例 #9
0
ファイル: ofApp.cpp プロジェクト: crx091081gb/still-breathing
void BlobPeople::update(const cv::Rect& track) {
    cur = toOf(track).getCenter();
    smooth.interpolate(cur, 0.5);
    height = track.tl().y-track.br().y;
    bottom = track.br().y;
    left = track.tl().x;
    right = track.br().x;

    
}
コード例 #10
0
ファイル: rectangle.cpp プロジェクト: apennisi/annotationtool
void Rectangle::set(const cv::Rect &_rect)
{
    rect = _rect;
    tl = _rect.tl();
    br = _rect.br();
    left->set(tl, cv::Point(tl.x, br.y));
    top->set(tl, cv::Point(br.x, tl.y));
    bottom->set(tl, cv::Point(tl.x, br.y));
    right->set(cv::Point(br.x, tl.y), cv::Point(tl.x, br.y));
    info.set(_rect);
}
コード例 #11
0
ファイル: helpers.hpp プロジェクト: Amos-zq/4dface
/**
 * @brief Scales and translates a facebox. Useful for converting
 * between face boxes from different face detectors.
 *
 * To convert from V&J faceboxes to ibug faceboxes, use a scaling
 * of 0.85 and a translation_y of 0.2.
 * Ideally, we would learn the exact parameters from data.
 *
 * @param[in] facebox Input facebox.
 * @param[in] scaling The input facebox will be scaled by this factor.
 * @param[in] translation_y How much, in percent of the original facebox's width, the facebox will be translated in y direction. A positive value means facebox moves downwards.
 * @return The rescaled facebox.
 */
cv::Rect rescale_facebox(cv::Rect facebox, float scaling, float translation_y)
{
	// Assumes a square input facebox to work? (width==height)
	const auto new_width = facebox.width * scaling;
	const auto smaller_in_px = facebox.width - new_width;
	const auto new_tl = facebox.tl() + cv::Point2i(smaller_in_px / 2.0f, smaller_in_px / 2.0f);
	const auto new_br = facebox.br() - cv::Point2i(smaller_in_px / 2.0f, smaller_in_px / 2.0f);
	cv::Rect rescaled_facebox(new_tl, new_br);
	rescaled_facebox.y += facebox.width * translation_y;
	return rescaled_facebox;
};
コード例 #12
0
ファイル: main.cpp プロジェクト: hgaiser/MasterThesis
float jaccardSimilarity(cv::Rect bbox1, cv::Rect bbox2) {
	cv::Vec4i bi(std::max(bbox1.x, bbox2.x), std::max(bbox1.y,bbox2.y), std::min(bbox1.br().x,bbox2.br().x), std::min(bbox1.br().y,bbox2.br().y));
	int iw = bi[2] - bi[0];
	int ih = bi[3] - bi[1];
	if (iw > 0 && ih > 0) {
		int un = (bbox1.br().x - bbox1.x) * (bbox1.br().y - bbox1.y) +
		(bbox2.br().x - bbox2.x) * (bbox2.br().y - bbox2.y) - iw * ih;
		return iw * ih / float(un);
	}

	return 0.f;
}
コード例 #13
0
void CallBackFunc(int evnt, int x, int y, int flags, void* userdata) {
	if (evnt == cv::EVENT_LBUTTONDOWN) {
		mouseButtonDown = true;
		targetSelected = false;
		boundingRect = cv::Rect(0,0,0,0);
		point1 = cv::Point(x,y);
		cv::destroyWindow(targetName);
		cv::destroyWindow(ColorTracker.getColorSquareWindowName());
		targetImage.release();
	}
	if (evnt == cv::EVENT_MOUSEMOVE) {
		if (x < 0) x = 0;
		else if (x > image.cols) x = image.cols;
		if (y < 0) y = 0;
		else if (y > image.rows) y = image.rows;
		point2 = cv::Point(x,y);
		if (mouseButtonDown) {
			boundingRect = cv::Rect(point1,point2);
		}
		cv::imshow(imageName,image);
	}
	if (evnt == cv::EVENT_LBUTTONUP) {
		mouseButtonDown = false;
		if (boundingRect.area() != 0) {
			targetImage = image(calibratedRect(boundingRect));
			cv::imshow(targetName, targetImage);
		}
		else {
			boundingRect = cv::Rect(point1-cv::Point(5,5),point1+cv::Point(5,5));
			targetImage = image(calibratedRect(boundingRect));
			cv::imshow(targetName, targetImage);
		}
		targetSelected = true;
    }
}
コード例 #14
0
void drawQuadrants(cv::Point targetCoord) {
	cv::line(image, cv::Point(0, centerPoint.y), cv::Point(frameWidth, centerPoint.y), blackColor);
	cv::line(image, cv::Point(centerPoint.x, 0), cv::Point(centerPoint.x, frameHeight), blackColor);
	if (targetCoord != cv::Point(999,999)) {
		if (centerRectangle.contains(targetCoord)) {
			targetCentered = true;
			targetInQ1 = targetInQ2 = targetInQ3 = targetInQ4 = false;
			highlightAllQuadrants();
		}
		else if (targetCoord.x >= centerPoint.x) {
			if (targetCoord.y <= centerPoint.y) {
				targetInQ1 = true;
				targetInQ2 = targetInQ3 = targetInQ4 = targetCentered = false;
				highlightQuadrant(1);
			}
			else if (targetCoord.y > centerPoint.y) {
				targetInQ4 = true;
				targetInQ1 = targetInQ2 = targetInQ3 = targetCentered = false;
				highlightQuadrant(4);
			}
		}
		else if (targetCoord.x < centerPoint.x) {
			if (targetCoord.y <= centerPoint.y) {
				targetInQ2 = true;
				targetInQ1 = targetInQ3 = targetInQ4 = targetCentered = false;
				highlightQuadrant(2);
			}
			else if (targetCoord.y > centerPoint.y) {
				targetInQ3 = true;
				targetInQ1 = targetInQ2 = targetInQ4 = targetCentered = false;
				highlightQuadrant(3);
			}
		}
	}
}
コード例 #15
0
ファイル: Target.cpp プロジェクト: chenyq123/3516a_sample
bool Target::init(KVConfig *cfg, int id, const cv::Rect &roi, const cv::Mat &curr_gray,
    double stamp, double min_dis_5frames, int min_pts, int max_feature_pts, double qualitylevel)
{
    first_rc_ = roi;
    outer_.x = 0, outer_.y = 0, outer_.width = curr_gray.cols, outer_.height = curr_gray.rows;
    stamp_ = stamp;
    cfg_ = cfg;
    id_ = id;
    min_dis_5frames_ = min_dis_5frames;
    min_pts_ = min_pts;

    stopped_dis_ = atof(cfg->get_value("pd_target_stopped_dis", "2.0"));

    //int max_features_pts = atoi(cfg->get_value("pd_max_features_pts", "300"));
    int max_features_pts = 200;

    PTS pts;
    //cv::goodFeaturesToTrack(curr_gray(roi), pts, max_feature_pts, qualitylevel, 1.5);
    hi_goodFeaturesToTrack(curr_gray(roi), pts, 200, qualitylevel, 1.5);

    if ((int)pts.size() < min_pts_) {
        return false;
    }

    l2g(pts, roi.tl());

    layers_.push_back(pts);

    brc_ = roi;
    last_rc_ = cv::boundingRect(pts);

    return true;
}
コード例 #16
0
ファイル: TrackFace.cpp プロジェクト: duguyue100/TrackFace
void TrackFace::drawFace(cv::Mat & frame, cv::Rect & face_n, string box_text)
{
    rectangle(frame, face_n, CV_RGB(0,255,0),1);
    int pos_x=std::max(face_n.tl().x-10, 0);
    int pos_y=std::max(face_n.tl().y-10, 0);
    putText(frame, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0),2.0);
}
コード例 #17
0
ファイル: utils_train.cpp プロジェクト: spideryan/FaceX
vector<cv::Point2d> MapShape(cv::Rect original_face_rect,
	const vector<cv::Point2d> original_landmarks, cv::Rect new_face_rect)
{
	vector<cv::Point2d> result;
	for (const cv::Point2d &landmark: original_landmarks)
	{
		result.push_back(landmark);
		result.back() -= static_cast<cv::Point2d>(original_face_rect.tl());
		result.back().x *= 
			static_cast<double>(new_face_rect.width) / original_face_rect.width;
		result.back().y *= 
			static_cast<double>(new_face_rect.height) / original_face_rect.height;
		result.back() += static_cast<cv::Point2d>(new_face_rect.tl());
	}
	return result;
}
コード例 #18
0
ファイル: Map2DRender.cpp プロジェクト: OpenGelo/Map2DFusion
    void prepare(cv::Rect dst_roi)
    {
        using namespace cv;
        dst_roi_final_ = dst_roi;

        // Crop unnecessary bands
        double max_len = static_cast<double>(max(dst_roi.width, dst_roi.height));
        num_bands_ = min(actual_num_bands_, static_cast<int>(ceil(log(max_len) / log(2.0))));

        // Add border to the final image, to ensure sizes are divided by (1 << num_bands_)
        dst_roi.width += ((1 << num_bands_) - dst_roi.width % (1 << num_bands_)) % (1 << num_bands_);
        dst_roi.height += ((1 << num_bands_) - dst_roi.height % (1 << num_bands_)) % (1 << num_bands_);

        Blender::prepare(dst_roi);

        dst_pyr_laplace_.resize(num_bands_ + 1);
        dst_pyr_laplace_[0] = dst_;

        dst_band_weights_.resize(num_bands_ + 1);
        dst_band_weights_[0].create(dst_roi.size(), weight_type_);
        dst_band_weights_[0].setTo(0);

        for (int i = 1; i <= num_bands_; ++i)
        {
            dst_pyr_laplace_[i].create((dst_pyr_laplace_[i - 1].rows + 1) / 2,
                                       (dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);
            dst_band_weights_[i].create((dst_band_weights_[i - 1].rows + 1) / 2,
                                        (dst_band_weights_[i - 1].cols + 1) / 2, weight_type_);
            dst_pyr_laplace_[i].setTo(Scalar::all(0));
            dst_band_weights_[i].setTo(0);
        }
    }
コード例 #19
0
ファイル: Algorithm.cpp プロジェクト: pbimage/Algorithm
inline void CalcVarianceAndSD(cv::Rect &block, cv::Mat &sum, cv::Mat &sqsum, double &mean, double &stdvar)
{	
	double brs = sum.at<int>(block.y+block.height,block.x+block.width);			// D
	double bls = sum.at<int>(block.y+block.height,block.x);						// C
	double trs = sum.at<int>(block.y,block.x+block.width);						// B
	double tls = sum.at<int>(block.y,block.x);									// A
	double brsq = sqsum.at<double>(block.y+block.height,block.x+block.width);	// D
	double blsq = sqsum.at<double>(block.y+block.height,block.x);				// C
	double trsq = sqsum.at<double>(block.y,block.x+block.width);				// B
	double tlsq = sqsum.at<double>(block.y,block.x);							// A
	mean = (brs + tls-trs-bls)/((double)block.area() + 1);				// D + A - B - C
	double sqmean = (brsq+tlsq-trsq-blsq)/((double)block.area() + 1);	// D + A - B - C
	stdvar = sqrt(sqmean - mean * mean);

	return;
}
コード例 #20
0
//RETURNS THE TOP-LEFT CORNER OF THE REFLECTION OF sourceTemplate ON image
cv::Rect findBestMatchLocation( const cv::Mat& image, const cv::Rect& source_rect, 
        double* nsigma, const cv::Mat& mask )
{
    cv::Mat image_gray;
    cvtColor( image, image_gray, CV_RGB2GRAY, 1 );
    cv::Mat image_copy = image_gray.clone();
    
    // Create template.
    cv::Mat image_template_copy = image_gray.clone();
    cv::Mat sourceTemplate = image_template_copy( source_rect );
    flip( sourceTemplate, sourceTemplate, 0 );

    // Creates results matrix where the top left corner of the 
    // template is slid across each pixel of the source
    int result_cols = image.cols-sourceTemplate.cols+1;
    int result_rows = image.rows-sourceTemplate.rows+1;
    cv::Mat result;
    result.create( result_cols,result_rows, CV_32FC1 );

    // Mask image to match only in selected ROI.
    if( !mask.empty() )
    {
        cv::Mat tmp;
		image_copy.copyTo( tmp, mask );
        image_copy = tmp;
    }

    //0:CV_TM_SQDIFF
    //1:CV_TM_SQDIFF_NORMED
    //2:CV_TM_CORR
    //3:CV_TM_CCOR_NORMED
    //4:CV_TM_CCOEFF
    //5:CV_TM_CCOEFF_NORMED <----Most succesful at finding reflections
    
    int match_method = CV_TM_CCOEFF_NORMED; // 4 seemed good for stddev thresholding.

    //matchTemplate( masked_scene, sourceTemplate, result, match_method );
    matchTemplate( image_gray, sourceTemplate, result, match_method );

    double minVal, maxVal; 
    cv::Point minLoc, maxLoc, matchLoc;
    minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat() );
     
    if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
    {
        matchLoc = minLoc;	
    }
    else 
    {
        matchLoc = maxLoc;
    }
    cv::Scalar mean, stddev;
    meanStdDev( result, mean, stddev, cv::Mat() );
    *nsigma = ( maxVal-mean[0] )/ stddev[0];
    // matchLoc is the location of the top left corner of the reflection
    // that matchTemplate found

    return cv::Rect( matchLoc, source_rect.size() );
}
コード例 #21
0
ファイル: MotionDetector.cpp プロジェクト: anhDean/PBAS-
bool MotionDetector::isInMotion(cv::Rect boundingBox, float interSection)
{
	if (cv::sum(m_motionMap(boundingBox))[0] > interSection * boundingBox.area())
		return true;
	else
		return false;

}
コード例 #22
0
    bool
ARC_Pair::set_reflection ( cv::Mat img, cv::Rect r, cv::Size s )
{
    roi.reflection = convert_to_point( r, img, s );
    if( roi.reflection==cv::Point( -1, -1 ) )
        roi.reflection=r.tl()-0.5*cv::Point(s);
    return true;
}		/* -----  end of method ARC_Pair::set_reflection  ----- */
コード例 #23
0
std::pair<double, cv::Point> DescribeMaximum(const cv::Mat &surface,
                                             const cv::Rect &boundingBox) {
  // assert surface double
  cv::Point position = boundingBox.tl();
  cv::Point end = boundingBox.br();
  size_t x = position.x, y = position.y;
  double value = surface.at<double>(position);
  for (; x <= end.x; x++) {
    for (; y <= end.y; y++) {
      if (surface.at<double>(y, x) > value) {
        value = surface.at<double>(y, x);
        position = cv::Point(x, y);
      }
    }
  }
  return std::make_pair(value, position);
}
コード例 #24
0
ファイル: face.cpp プロジェクト: darrikmazey/libgesture
Face::Face(cv::Rect &r)
{
	cv::Point tl = r.tl();
	m_center = cv::Point(tl.x + r.width/2, tl.y + r.height/2);
	m_width = r.width;
	m_height = r.height;
	m_weight = 1;
}
コード例 #25
0
bool EndToEndTest::rectMatches(cv::Rect actualPlate, PlateRegion candidate)
{
  // Determine if this region matches our plate in the image
  // Do this simply by verifying that the center point of the plate is within the region
  // And that the plate region is not x% larger or smaller

  const float MAX_SIZE_PERCENT_LARGER = 0.65;

  //int plateCenterX = actualPlate.x + (int) (((float) actualPlate.width) / 2.0);
  //int plateCenterY = actualPlate.y + (int) (((float) actualPlate.height) / 2.0);
  //Point centerPoint(plateCenterX, plateCenterY);
  
  vector<Point> requiredPoints;
  requiredPoints.push_back(Point( actualPlate.x + (int) (((float) actualPlate.width) * 0.2),
				   actualPlate.y + (int) (((float) actualPlate.height) * 0.15)
			   ));
  requiredPoints.push_back(Point( actualPlate.x + (int) (((float) actualPlate.width) * 0.8),
				   actualPlate.y + (int) (((float) actualPlate.height) * 0.15)
			  ));
  requiredPoints.push_back(Point( actualPlate.x + (int) (((float) actualPlate.width) * 0.2),
				  actualPlate.y + (int) (((float) actualPlate.height) * 0.85)
			  ));
  requiredPoints.push_back(Point( actualPlate.x + (int) (((float) actualPlate.width) * 0.8),
				actualPlate.y + (int) (((float) actualPlate.height) * 0.85)
			));
  

  float sizeDiff = 1.0 - ((float) actualPlate.area()) / ((float) candidate.rect.area());

  //cout << "Candidate: " << candidate.rect.x << "," << candidate.rect.y << " " << candidate.rect.width << "-" << candidate.rect.height << endl;
  //cout << "Actual:    " << actualPlate.x << "," << actualPlate.y << " " << actualPlate.width << "-" << actualPlate.height << endl;
  
  //cout << "size diff: " << sizeDiff << endl;
  
  bool hasAllPoints = true;
  for (int z = 0; z < requiredPoints.size(); z++)
  {
    if (candidate.rect.contains(requiredPoints[z]) == false)
      hasAllPoints = false;
    break;
  }
  if ( hasAllPoints && 
    (sizeDiff < MAX_SIZE_PERCENT_LARGER) )

  {
    return true;
  }
  else
  {
    for (int i = 0; i < candidate.children.size(); i++)
    {
      if (rectMatches(actualPlate, candidate.children[i]))
	return true;
    }
  }
  
  return false;
}
コード例 #26
0
ファイル: MotionDetector.cpp プロジェクト: anhDean/PBAS-
bool MotionDetector::containsBoundingBox(cv::Rect outer, cv::Rect inner)
{
	float intersectionParam = 0.45;
	cv::Rect intersect = outer & inner;
	if (intersect .area() >= intersectionParam * inner.area())
		return true;
	else
		return false;
}
コード例 #27
0
// === FUNCTION ======================================================================
// Name: get_masked_frame
// Description: Masks the frame based on slope and roi. Mask returned by pointer.
// =====================================================================================
cv::Mat get_masked_frame ( cv::Rect roi, double slope, cv::Mat* frame, cv::Mat* mask )
{
    cv::Point corners[1][4];
    //Set the frame
    *mask=cv::Mat::zeros( frame->size(), CV_8UC1 );
    cv::Mat masked_frame;
    if( slope==0 )
    {
        // TODO: Could use direction handling here.
        corners[0][0] = roi.br();
        corners[0][1] = cv::Point( frame->cols, roi.y+roi.height );
        corners[0][2] = corners[0][1]-cv::Point( 0, roi.height );
        corners[0][3] = corners[0][0]-cv::Point( 0, roi.height );
    }
    else if( isinf( slope ) )
    {
        {
            corners[0][0] = cv::Point( roi.x, frame->rows );
            corners[0][1] = cv::Point( roi.x, roi.y+roi.height);
        }
        {
            corners[0][0] = roi.tl();
            corners[0][1] = cv::Point( roi.x, 0 );
        }
        corners[0][2] = corners[0][1]+cv::Point( roi.width, 0);
        corners[0][3] = corners[0][0]+cv::Point( roi.width, 0 );
    }
    else
    {
        corners[0][0].x = ( int ) ( (frame->rows + slope*roi.x-roi.y)/slope );
        corners[0][0].y = frame->rows;
        corners[0][1] = cv::Point( ( int )( (-roi.y + slope * roi.x ) / slope ), 0 );
        corners[0][2] = corners[0][1] + cv::Point(roi.width, 0);
        corners[0][3] = corners[0][0] + cv::Point(roi.width, 0);
    }

    // This is weird, but follows OpenCV docs.
    const cv::Point* ppt[1] = { corners[0] };
    const int npt[] = { 4 };

    fillPoly( *mask, ppt, npt, 1, 255 );
    frame->copyTo(masked_frame, *mask);
    return masked_frame;
}	// ----- end of function get_masked_frame ----- 
コード例 #28
0
/** determine if a rectangle contains "blank" pixels **/
bool Cropper::rectHasBlankPixels(const cv::Rect &roi) {
    Point2i tr(roi.x + roi.width - 1, roi.y);
    Point2i bl(roi.x, roi.y + roi.height - 1);
    Point2i br(roi.x + roi.width - 1, roi.y + roi.height - 1);
    
    if(lineHasBlankPixels(roi.tl(), tr))
        return true;
    
    if(lineHasBlankPixels(tr, br))
       return true;
    
    if(lineHasBlankPixels(bl, br))
        return true;
    
    if(lineHasBlankPixels(roi.tl(), bl))
        return true;
    
    return false;
}
コード例 #29
0
ファイル: main.cpp プロジェクト: Pandhariix/Tracking
bool fusionRects(cv::Rect prevRectK, cv::Rect rectK)
{
    if(prevRectK.area() == 0)
        return false;

    if(rectK.width > 1.8*prevRectK.width)
        return true;
    else
        return false;
}
コード例 #30
0
    Point computeIntersect(cv::Vec4i a, cv::Vec4i b, cv::Rect ROI)
    {
        int x1 = a[0], y1 = a[1], x2 = a[2], y2 = a[3];
        int x3 = b[0], y3 = b[1], x4 = b[2], y4 = b[3];

        Point p1 = Point(x1,y1);
        Point p2 = Point(x2,y2);
        Point p3 = Point(x3,y3);
        Point p4 = Point(x4,y4);

        if( !ROI.contains(p1) || !ROI.contains(p2)
                || !ROI.contains(p3) || !ROI.contains(p4) )
            return Point(-1,-1);

        Point vec1 = p1-p2;
        Point vec2 = p3-p4;

        float vec1_norm2 = vec1.x*vec1.x + vec1.y*vec1.y;
        float vec2_norm2 = vec2.x*vec2.x + vec2.y*vec2.y;
        float cosTheta = (vec1.dot(vec2))/sqrt(vec1_norm2*vec2_norm2);

        float den = ((float)(x1-x2) * (y3-y4)) - ((y1-y2) * (x3-x4));
        if(den != 0)
        {
            cv::Point2f pt;
            pt.x = ((x1*y2 - y1*x2) * (x3-x4) - (x1-x2) * (x3*y4 - y3*x4)) / den;
            pt.y = ((x1*y2 - y1*x2) * (y3-y4) - (y1-y2) * (x3*y4 - y3*x4)) / den;

            if( !ROI.contains(pt) )
                return Point(-1,-1);

            // no-confidence metric
            float d1 = MIN( dist2(p1,pt), dist2(p2,pt) )/vec1_norm2;
            float d2 = MIN( dist2(p3,pt), dist2(p4,pt) )/vec2_norm2;

            float no_confidence_metric = MAX(sqrt(d1),sqrt(d2));

            if( no_confidence_metric < 0.5 && cosTheta < 0.707 )
                return Point(int(pt.x+0.5), int(pt.y+0.5));
        }

        return cv::Point(-1, -1);
    }