Example #1
0
double overlapJaccard(cv::Rect& r1,cv::Rect& r2){
 
    double overlap=(r1 & r2).area();
    
    double overlapJaccard=overlap/(r1.area()+r2.area()-overlap);
    
    return overlapJaccard;
}
Example #2
0
//[0; 1] (0.5 when different_area == common_area)
inline float rect_similarity(const cv::Rect &r1, const cv::Rect &r2)
{
	float common = (r1 & r2).area();
	float different = (r1.area() + r2.area() - 2.0f * common);
	if (different > FLT_EPSILON)
		return std::min(0.5f * common / different, 1.0f);
	else
		return 1.0f;
}
Example #3
0
inline void CalcVarianceAndSD(cv::Rect &block, cv::Mat &sum, cv::Mat &sqsum, double &mean, double &stdvar)
{	
	double brs = sum.at<int>(block.y+block.height,block.x+block.width);			// D
	double bls = sum.at<int>(block.y+block.height,block.x);						// C
	double trs = sum.at<int>(block.y,block.x+block.width);						// B
	double tls = sum.at<int>(block.y,block.x);									// A
	double brsq = sqsum.at<double>(block.y+block.height,block.x+block.width);	// D
	double blsq = sqsum.at<double>(block.y+block.height,block.x);				// C
	double trsq = sqsum.at<double>(block.y,block.x+block.width);				// B
	double tlsq = sqsum.at<double>(block.y,block.x);							// A
	mean = (brs + tls-trs-bls)/((double)block.area() + 1);				// D + A - B - C
	double sqmean = (brsq+tlsq-trsq-blsq)/((double)block.area() + 1);	// D + A - B - C
	stdvar = sqrt(sqmean - mean * mean);

	return;
}
void CallBackFunc(int evnt, int x, int y, int flags, void* userdata) {
	if (evnt == cv::EVENT_LBUTTONDOWN) {
		mouseButtonDown = true;
		targetSelected = false;
		boundingRect = cv::Rect(0,0,0,0);
		point1 = cv::Point(x,y);
		cv::destroyWindow(targetName);
		cv::destroyWindow(ColorTracker.getColorSquareWindowName());
		targetImage.release();
	}
	if (evnt == cv::EVENT_MOUSEMOVE) {
		if (x < 0) x = 0;
		else if (x > image.cols) x = image.cols;
		if (y < 0) y = 0;
		else if (y > image.rows) y = image.rows;
		point2 = cv::Point(x,y);
		if (mouseButtonDown) {
			boundingRect = cv::Rect(point1,point2);
		}
		cv::imshow(imageName,image);
	}
	if (evnt == cv::EVENT_LBUTTONUP) {
		mouseButtonDown = false;
		if (boundingRect.area() != 0) {
			targetImage = image(calibratedRect(boundingRect));
			cv::imshow(targetName, targetImage);
		}
		else {
			boundingRect = cv::Rect(point1-cv::Point(5,5),point1+cv::Point(5,5));
			targetImage = image(calibratedRect(boundingRect));
			cv::imshow(targetName, targetImage);
		}
		targetSelected = true;
    }
}
Example #5
0
bool MotionDetector::isInMotion(cv::Rect boundingBox, float interSection)
{
	if (cv::sum(m_motionMap(boundingBox))[0] > interSection * boundingBox.area())
		return true;
	else
		return false;

}
Example #6
0
bool EndToEndTest::rectMatches(cv::Rect actualPlate, PlateRegion candidate)
{
  // Determine if this region matches our plate in the image
  // Do this simply by verifying that the center point of the plate is within the region
  // And that the plate region is not x% larger or smaller

  const float MAX_SIZE_PERCENT_LARGER = 0.65;

  //int plateCenterX = actualPlate.x + (int) (((float) actualPlate.width) / 2.0);
  //int plateCenterY = actualPlate.y + (int) (((float) actualPlate.height) / 2.0);
  //Point centerPoint(plateCenterX, plateCenterY);
  
  vector<Point> requiredPoints;
  requiredPoints.push_back(Point( actualPlate.x + (int) (((float) actualPlate.width) * 0.2),
				   actualPlate.y + (int) (((float) actualPlate.height) * 0.15)
			   ));
  requiredPoints.push_back(Point( actualPlate.x + (int) (((float) actualPlate.width) * 0.8),
				   actualPlate.y + (int) (((float) actualPlate.height) * 0.15)
			  ));
  requiredPoints.push_back(Point( actualPlate.x + (int) (((float) actualPlate.width) * 0.2),
				  actualPlate.y + (int) (((float) actualPlate.height) * 0.85)
			  ));
  requiredPoints.push_back(Point( actualPlate.x + (int) (((float) actualPlate.width) * 0.8),
				actualPlate.y + (int) (((float) actualPlate.height) * 0.85)
			));
  

  float sizeDiff = 1.0 - ((float) actualPlate.area()) / ((float) candidate.rect.area());

  //cout << "Candidate: " << candidate.rect.x << "," << candidate.rect.y << " " << candidate.rect.width << "-" << candidate.rect.height << endl;
  //cout << "Actual:    " << actualPlate.x << "," << actualPlate.y << " " << actualPlate.width << "-" << actualPlate.height << endl;
  
  //cout << "size diff: " << sizeDiff << endl;
  
  bool hasAllPoints = true;
  for (int z = 0; z < requiredPoints.size(); z++)
  {
    if (candidate.rect.contains(requiredPoints[z]) == false)
      hasAllPoints = false;
    break;
  }
  if ( hasAllPoints && 
    (sizeDiff < MAX_SIZE_PERCENT_LARGER) )

  {
    return true;
  }
  else
  {
    for (int i = 0; i < candidate.children.size(); i++)
    {
      if (rectMatches(actualPlate, candidate.children[i]))
	return true;
    }
  }
  
  return false;
}
Example #7
0
bool MotionDetector::containsBoundingBox(cv::Rect outer, cv::Rect inner)
{
	float intersectionParam = 0.45;
	cv::Rect intersect = outer & inner;
	if (intersect .area() >= intersectionParam * inner.area())
		return true;
	else
		return false;
}
Example #8
0
bool fusionRects(cv::Rect prevRectK, cv::Rect rectK)
{
    if(prevRectK.area() == 0)
        return false;

    if(rectK.width > 1.8*prevRectK.width)
        return true;
    else
        return false;
}
Example #9
0
region_descriptor create_rectangle(cv::Rect rect)
{
	region_descriptor result;
	for(int i = 0; i < rect.height; ++i)
		result.lineIntervals.push_back(region_interval(i+rect.y, rect.x, rect.x + rect.width));

	result.bounding_box = rect;
	result.m_size = rect.area();

	return result;
}
void ImageSegmentation::thresholding(const cv::Mat &grey, cv::Mat &bin, const double threshold, const int method, const cv::Rect roi)
{
  if(roi.area() == 0)
  {
    cv::threshold(grey, bin, threshold, 255, method);
  }
  else
  {
    cv::threshold(grey(roi), bin, threshold, 255, method);
  }
}
Example #11
0
void Detector::set_state(cv::Rect state, float confidence) {
  CHECK(0.0f <= confidence && confidence <= 1.0f);
  if (!IsRectInsideFrame(state, frame_)) {
    WARNING("given state " << state << " is outside the frame");
  } else if (state.area() == 0) {
    WARNING("given state is empty");
  }

  state_ = state;
  confidence_ = confidence;
}
Example #12
0
    bool getBox()
    {
        // Crops the image based on user input and creates a template for the tracker with it.
        printf("Reading image!!\n");
        ImageOf<PixelRgb> *imgIn = imInPort.read();  // read an image
        cv::Mat img((IplImage*) imgIn->getIplImage());	   
     
        printf("Click first top left and then bottom right from the object !!\n");
        bool boxOK = false;
        //Bottle &out  = coordsOutPort.prepare();
        cv::Point tl, br;

        while (!boxOK){
            printf("Click on top left!\n");
            Bottle *point1 = coordsInPort.read(true);
            tl.x =  point1->get(0).asDouble();
            tl.y =  point1->get(1).asDouble();
            printf("Point read at %d, %d!!\n", tl.x, tl.y);

            printf("Click on bottom right!\n");
            Bottle *point2 = coordsInPort.read(true);            
            br.x =  point2->get(0).asDouble();
            br.y =  point2->get(1).asDouble();
            printf("Point read at %d, %d!!\n", br.x, br.y);

            BBox = cv::Rect(tl,br);            
            if (BBox.area() > 20) {
                printf("valid coordinates, cropping image!\n");
                boxOK = true;}
            else {printf("Coordinates not valid, click again!\n");}
        }

        printf("Prep out mat !!\n");
        ImageOf<PixelRgb> &templateOut  = tempOutPort.prepare();
        templateOut.resize(BBox.width, BBox.height);
        cv::Mat tempOut((IplImage*)templateOut.getIplImage(),false);
        img(BBox).copyTo(tempOut);
        //cv::GaussianBlur(img(BBox), imOut, cv::Size(1,1), 1, 1);

        double t0 = Time::now();
        while(Time::now()-t0 < 1) {  //send the template for one second
            printf("Writing Template!\n");
            tempOutPort.write();
            Time::delay(0.1);
        }

        tracking = true;
        return true;
    }
Example #13
0
void DrawCircles(Mat& frame, std::vector<Rect>& hands, cv::Rect& maxRect, int& posX, int& posY)
{
	// Draw circles on the detected hands
	for (int i = 0; i < hands.size(); i++)
	{
		if (hands[i].area() > maxRect.area())
			maxRect = hands[i];
	}
	Point center(maxRect.x + maxRect.width*0.5, maxRect.y + maxRect.height*0.5);
	ellipse(frame, center, Size(maxRect.width*0.5, maxRect.height*0.5), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
	circle(frame, center, 5, Scalar(144, 144, 255), 3);
	posX = maxRect.x + maxRect.width*0.5;
	posY = maxRect.y + maxRect.height*0.5;

}
Example #14
0
FrameDescriptor::FrameDescriptor(int numberOfFeatures, cv::Rect roi_rect) : numberOfFeatures(numberOfFeatures), roi_rect(roi_rect),frame_number(0) {

    //Allocate KeyPoint Vector
    featurePoints.reserve(numberOfFeatures);

    //if we have an roi to generate, set the flag, otherwise, set process frame to point to refFrame
    has_been_normalized = false;
    if(roi_rect.area() > 0) {
        roi_set = true;
        roi_offset = cv::Point2f(roi_rect.x,roi_rect.y);
        process_frame = new cv::Mat();
    } else {
        roi_set = false;
        roi_offset = cv::Point2f(0,0);
        process_frame = &refFrame;
    }

}
Example #15
0
void FaceProcessor::enrollImagePart(const cv::Mat &rgbImage, double &resRed, double &resGreen, double &resBlue, double &resT, cv::Rect roirect)
{
    if(roirect == cv::Rect()) {
        roirect = cv::Rect(0,0,rgbImage.cols,rgbImage.rows);
    } else {
        roirect = roirect & cv::Rect(0,0,rgbImage.cols,rgbImage.rows);
    }
    unsigned long red = 0;
    unsigned long green = 0;
    unsigned long blue = 0;
    unsigned long area = 0;
    if(roirect.area() > 0) {
        cv::Mat region = cv::Mat(rgbImage,roirect);
        unsigned char *ptr;
        unsigned char tR = 0, tG = 0, tB = 0;
        #pragma omp parallel for private(ptr,tB,tG,tR) reduction(+:area,green)
        for(int j = 0; j < roirect.height; j++) {
            ptr = region.ptr(j);
            for(int i = 0; i < roirect.width; i++) {
                tB = ptr[3*i];
                tG = ptr[3*i+1];
                tR = ptr[3*i+2];
                if( /*__skinColor(tR, tG, tB)*/ true) {
                    area++;
                    red   += tR;
                    green += tG;
                    blue  += tB;
                }
            }
        }
    }

    resT = ((double)cv::getTickCount() -  (double)m_markTime)*1000.0 / cv::getTickFrequency();
    m_markTime = cv::getTickCount();
    if(area > 16) {
        resRed = ((double)red) / area;
        resGreen = ((double)green) / area;
        resBlue = ((double)blue) / area;
    } else {
        resRed = 0.0;
        resGreen = 0.0;
        resBlue = 0.0;
    }
}
Example #16
0
 bool rectHasLargerArea(cv::Rect a, cv::Rect b) { return a.area() < b.area(); };
 static PixelType mean_from_integral(const cv::Mat& mat, cv::Rect region)
 {
     return sum_from_integral<PixelType>(mat, region) / region.area();
 }
Example #18
0
cv::Mat_<byte> DepthSegmenter::calcMostCommonDepthMask( const cv::Rect subRect, cv::Mat_<float> mask) const
{
    cv::Mat_<float> subMat = _rngMat;
    if ( subRect.area() > 0)
        subMat = _rngMat(subRect);
    if ( mask.empty())
        mask = cv::Mat_<float>::ones(subMat.size());
    assert( mask.rows == subMat.rows && mask.cols == subMat.cols);

    // Find the most common depth 
    int *bins = (int*)calloc( _depthLevels, sizeof(int));   // Zero'd
    float *means = (float*)calloc( _depthLevels, sizeof(float));    // Zero'd within bin depth means
    std::vector< std::vector<float> > dvals( _depthLevels);

    const double rngDelta = _maxRng - _minRng;

    int topIdx = 0; // Remember top index (most hits)
    int maxCnt = 0;
    const int rows = subMat.rows;
    const int cols = subMat.cols;
    for ( int i = 0; i < rows; ++i)
    {
        const float *pxRow = subMat.ptr<float>(i);
        const float *maskRow = mask.ptr<float>(i);

        for ( int j = 0; j < cols; ++j)
        {
            if ( maskRow[j])    // Ignore zero values
            {
                const float depth = pxRow[j];
                if ( depth <= 0)
                    continue;

                const int b = binVal( bins, means, depth, rngDelta);
                if ( b < 0) // Out of range so continue
                    continue;

                dvals[b].push_back(depth);  // Store the depth value itself for later std-dev calc
                if ( bins[b] > maxCnt)
                {
                    topIdx = b;
                    maxCnt = bins[b];
                }   // end if
            }   // end if
        }   // end for - cols
    }   // end for - rows

    /*
    for ( int i = 0; i < _depthLevels; ++i)
        cerr << "Bin " << i << ": " << bins[i] << endl;
    cerr << "Top bin = " << topIdx << endl;
    */
    free(bins);

    const float meanDepth = means[topIdx];    // Mean of most common depth values
    //free(means);

    // Calculate the std-dev for the most common depth interval
    const std::vector<float>& vds = dvals[topIdx];
    const int sz = (int)vds.size();
    double sumSqDiffs = 0;
    for ( int i = 0; i < sz; ++i)
        sumSqDiffs += pow(vds[i]-meanDepth,2);
    const double stddev = sqrt(sumSqDiffs/sz);
    _lastStdDev = float(stddev);

    // Identify the pixels closest to meanDepth within c*stddev
    const float withinRng = _inlierFactor * _lastStdDev;

    float minDepth = FLT_MAX;
    float maxDepth = 0;
    float totDepth = 0;
    int pxCount = 0;

    cv::Mat_<byte> outMat( rows, cols);
    for ( int i = 0; i < rows; ++i)
    {
        byte* outRow = outMat.ptr<byte>(i);
        const float* inRow = subMat.ptr<float>(i);
        for ( int j = 0; j < cols; ++j)
        {
            const float depth = inRow[j];
            if ( depth > 0 && (fabs(depth - meanDepth) < withinRng))
            {
                outRow[j] = 0xff;
                totDepth += depth;
                pxCount++;
                if ( depth < minDepth)
                    minDepth = depth;
                if ( depth > maxDepth)
                    maxDepth = depth;
            }   // end if
            else
                outRow[j] = 0;
        }   // end for
    }   // end for

    _lastMinDepth = minDepth;
    _lastMaxDepth = maxDepth;
    _lastAvgDepth = totDepth/pxCount;

    //cv::fastFree(dvals);
    return outMat;
}   // end calcMostCommonDepthMask
Example #19
0
void displacement(const cv::Rect& initial, const cv::Rect& last, cv::Point& centerDiff, double& areaRatio) {
    cv::Point centerInitial = center(initial);
    cv::Point centerLast = center(last);
    centerDiff = centerInitial - centerLast;
    areaRatio = ((double)(initial.area())) / last.area();
}
Example #20
0
double GridFitter::evaluateCandidate(PipelineGrid& grid, cv::Mat const& roi, cv::Mat const& binarizedROI, cv::Mat const& sobelXRoi, const cv::Mat& sobelYRoi, const settings_cache_t &settings)
{
	double error = 0;

	const cv::Rect boundingBox = grid.getBoundingBox();
	// return max error if either width or height is zero
	if (!boundingBox.area()) return std::numeric_limits<double>::max();
	// also return max error if grid bounding box is not within roi
	if (boundingBox.x < 0 || boundingBox.y < 0) return std::numeric_limits<double>::max();
	if (boundingBox.x + boundingBox.width >= roi.rows ||
	    boundingBox.y + boundingBox.height >= roi.cols) return std::numeric_limits<double>::max();

	enum ROI {
		BINARY = 0,
		GRAYSCALE
	};

	static const ROI roiKind = ROI::GRAYSCALE;
	static const double numErrorMeasurements = 6.;

	const cv::Mat& selectedRoi = roiKind == ROI::BINARY ? binarizedROI : roi;

	{
		error_counter_t<expected_white_error_fun_t> errorFun(selectedRoi);
		errorFun = grid.processInnerWhiteRingCoordinates(std::move(errorFun));
		error += errorFun.getNormalizedError() * settings.err_func_alpha_inner;
	}

	{
		error_counter_t<expected_black_error_fun_t> errorFun(selectedRoi);
		errorFun = grid.processInnerBlackRingCoordinates(std::move(errorFun));
		error += errorFun.getNormalizedError() * settings.err_func_alpha_inner;
	}

	for (size_t cellIdx = 0; cellIdx < Grid::NUM_MIDDLE_CELLS; ++cellIdx) {
		variance_online_calculator_t errorFun(selectedRoi);
		errorFun = grid.processGridCellCoordinates(cellIdx, std::move(errorFun));
		error += (errorFun.getNormalizedVariance() / Grid::NUM_MIDDLE_CELLS) * settings.err_func_alpha_variance;
	}

	{
		error_counter_t<expected_white_error_fun_t> errorFun(selectedRoi);
		errorFun = grid.processOuterRingCoordinates(std::move(errorFun));
		error += errorFun.getNormalizedError() * settings.err_func_alpha_outer;
	}

	{
        sobel_error_counter_t errorFun(sobelXRoi, sobelYRoi, settings.sobel_threshold);
		errorFun = grid.processOuterRingEdgeCoordinates(std::move(errorFun));
		error += errorFun.getNormalizedError() * settings.err_func_alpha_outer_edge;
	}

	{
        sobel_error_counter_t errorFun(sobelXRoi, sobelYRoi, settings.sobel_threshold);
		errorFun = grid.processInnerLineCoordinates(std::move(errorFun));
		error += errorFun.getNormalizedError() * settings.err_func_alpha_inner_edge;
	}

	error /= numErrorMeasurements;

	return error;
}
bool testThreshold( const cv::Mat_<double>& ii, const cv::Rect& rct, double thresh)
{
    return ( RFeatures::getIntegralImageSum<double>( ii, rct) / rct.area()) >= thresh;
}   // end testThreshold
Mat PicOpt::Optimize9Patch::ResizeImageRect(const Mat &img,
	const cv::Rect &rc,
	bool is_hrz,
	Vec2i &new_patch)
{
	if (rc.area() <= 0)
	{
		return img;
	}

	auto size = img.size();
	if (is_hrz)
	{
		cv::Rect left(Point(), size);
		cv::Rect center = left;
		cv::Rect right = left;

		int width = (std::min)(rc.width, center_width_);
		left.width = rc.x;
		center.x = rc.x + (rc.width - width) / 2;
		center.width = width;
		right.x = rc.x + rc.width;            // note that the line is [x, y) 
		right.width = size.width - right.x;

		Mat out(size.height, left.width + center.width + right.width, img.type());
		cv::Rect out_center = center;
		out_center.x = left.width;
		cv::Rect out_right = right;
		out_right.x = left.width + center.width;
		CopyImageRect(img, left, out, left);
		CopyImageRect(img, center, out, out_center);
		CopyImageRect(img, right, out, out_right);
		new_patch[0] = left.width;
		new_patch[1] = right.width;
		return out;
	}
	else
	{
		cv::Rect top(Point(), size);
		cv::Rect center = top;
		cv::Rect bottom = top;

		int height = (std::min)(rc.height, center_width_);
		top.height = rc.y;
		center.y = rc.y + (rc.height - height) / 2;
		center.height = height;
		bottom.y = rc.y + rc.height;
		bottom.height = size.height - bottom.y;

		Mat out(top.height + center.height + bottom.height, size.width, img.type());
		cv::Rect out_center = center;
		out_center.y = top.height;
		cv::Rect out_bottom = bottom;
		out_bottom.y = top.height + center.height;
		CopyImageRect(img, top, out, top);
		CopyImageRect(img, center, out, out_center);
		CopyImageRect(img, bottom, out, out_bottom);
		new_patch[0] = top.height;
		new_patch[1] = bottom.height;
		return out;
	}
}
void TrackAssociation::SuperviseTrainingForSubTracks()
{
	std::list<ObjSubTrack*> valid_track;
	for (size_t i = 0; i < subTrackVector.size(); ++i)
	{
		ObjSubTrack *subTrack = subTrackVector[i];
		if (subTrack->appModel.valid)
		{
			valid_track.push_back(subTrack);
		}
	}

	for (size_t i = 0; i<subTrackVector.size(); ++i)
	{
		ObjSubTrack *subTrack = subTrackVector[i];

		if (subTrack->appModel.valid)
		{
			const DetectionModel &model1 = subTrack->detectionSubTrack.back();
			const cv::Rect r1 = model1.ConvertToRect();

			Rect r1_rect;
			r1_rect = r1;
			Rect negative_region_rect = subTrack->appModel.tracker.getTrackingROI(r1_rect,7);
			cv::Rect negative_region = negative_region_rect.ConvertToRect();

			bool notTrain = false;
			std::list<ObjSubTrack*> negative_list; //other objTracks should be discriminated

			if (subTrack->appModel.tracker.tracking_lost)
			{
				notTrain = true;
// 				for (std::list<ObjSubTrack *>::iterator iter = valid_track.begin(); iter!=valid_track.end(); ++iter)
// 				{//push all other than itself
// 					if ((*iter)!=subTrack)
// 					{
// 						negative_list.push_back(*iter);
// 					}
// 				}
			}

			//collect neg tracks
			for (std::list<ObjSubTrack *>::iterator iter = valid_track.begin(); iter!=valid_track.end(); ++iter)
			{
				if ((*iter) != subTrack)
				{
					const DetectionModel &model2 = (*iter)->detectionSubTrack.back();
					const cv::Rect r2 = model2.ConvertToRect();

					cv::Rect intersect = r1 & r2;
					float overlap = intersect.area()/float(r1.area());
						
					if (overlap > 0.1f)
					{
						notTrain = true;
					}

					if ((*iter)->appModel.tracker.tracking_lost)
					{
						negative_list.push_back(*iter);
					}else
					{
						if ((r2 & negative_region).area()>0)
						{
							negative_list.push_back(*iter);
						}
					}
				}
			}

			//positive samples
			if (!notTrain)
			{
				if (subTrack->detectionSubTrack.back().external_detection)
				{
					subTrack->SupervisedTraining(frame_pool);
				}else
				{
					subTrack->SemiSupervisedTraining(frame_pool);
				}
			}

			subTrack->AddToNegList(negative_list);//Add to negative set (construct Nearest Neighbor Classifier)
		}
	}
}
Example #24
0
bool vpSortLargestFace(cv::Rect rect1, cv::Rect rect2)
{
  return (rect1.area() > rect2.area());
}
Example #25
0
bool MotionDetector::evaluateStaticObjRect(cv::Rect& tmp_rect)
{

	int tmp_area = tmp_rect.area();
	bool isChild = false;
	bool isParent = false;
	bool result = false;

	if (tmp_area > m_MIN_BLOBSIZE) // reject rects that are to small
	{
		if (!isInMotion(tmp_rect, 0.95)) // reject rects that are in motion
		{

			if (!(m_staticObjectCandidates.size() > m_MAX_STATIC_OBJ)) // reject rects after max count
			{
				AmbiguousCandidate tmp;
				tmp.boundingBox = tmp_rect;
				tmp.counter = 0;
				tmp.accMovement = 0;
				tmp.state = DETECTOR_UNCLASSIFIED_OBJECT;

				std::set<int> deleteIdx;
				int iteration = 0;


				for (int k = 0; k < m_staticObjectCandidates.size(); ++k)
				{
					if (containsBoundingBox(m_staticObjectCandidates[k].boundingBox, tmp_rect)) // is within
					{
						isChild = true;
						result = false;
						return result;
					}

					if (containsBoundingBox(tmp_rect, m_staticObjectCandidates[k].boundingBox)) // contains existing bounding box
					{
						if (isParent)
						{
							deleteIdx.insert(k);
						}
						else
						{
							isParent = true;
							result = true;
							m_staticObjectCandidates[k].boundingBox = tmp_rect;
						}
					}
				}

				for (auto it = deleteIdx.begin(); it != deleteIdx.end(); ++it)
				{
					m_staticObjectCandidates.erase(m_staticObjectCandidates.begin() + *it - iteration);
					++iteration;
				}

				if (!isParent && !isChild)
				{
					m_staticObjectCandidates.push_back(tmp);
					result = true;
				}
			}

		}

	}
	return result;

}