示例#1
0
int TrackerTLDImpl::Pexpert::additionalExamples(std::vector<Mat_<uchar> >& examplesForModel, std::vector<Mat_<uchar> >& examplesForEnsemble)
{
    examplesForModel.clear();
    examplesForEnsemble.clear();
    examplesForModel.reserve(100);
    examplesForEnsemble.reserve(100);

    std::vector<Rect2d> closest, scanGrid;
    Mat scaledImg, blurredImg;

    double scale = scaleAndBlur(img_, cvRound(log(1.0 * resultBox_.width / (initSize_.width)) / log(SCALE_STEP)),
                                scaledImg, blurredImg, GaussBlurKernelSize, SCALE_STEP);
    TLDDetector::generateScanGrid(img_.rows, img_.cols, initSize_, scanGrid);
    getClosestN(scanGrid, Rect2d(resultBox_.x / scale, resultBox_.y / scale, resultBox_.width / scale, resultBox_.height / scale), 10, closest);

    for( int i = 0; i < (int)closest.size(); i++ )
    {
        for( int j = 0; j < 10; j++ )
        {
            Point2f center;
            Size2f size;
            Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE, STANDARD_PATCH_SIZE), blurredPatch(initSize_);
            center.x = (float)(closest[i].x + closest[i].width * (0.5 + rng.uniform(-0.01, 0.01)));
            center.y = (float)(closest[i].y + closest[i].height * (0.5 + rng.uniform(-0.01, 0.01)));
            size.width = (float)(closest[i].width * rng.uniform((double)0.99, (double)1.01));
            size.height = (float)(closest[i].height * rng.uniform((double)0.99, (double)1.01));
            float angle = (float)rng.uniform(-5.0, 5.0);

            for( int y = 0; y < standardPatch.rows; y++ )
            {
                for( int x = 0; x < standardPatch.cols; x++ )
                {
                    standardPatch(x, y) += (uchar)rng.gaussian(5.0);
                }
            }
#ifdef BLUR_AS_VADIM
            GaussianBlur(standardPatch, blurredPatch, GaussBlurKernelSize, 0.0);
            resize(blurredPatch, blurredPatch, initSize_);
#else
            resample(blurredImg, RotatedRect(center, size, angle), blurredPatch);
#endif
            resample(scaledImg, RotatedRect(center, size, angle), standardPatch);
            examplesForModel.push_back(standardPatch);
            examplesForEnsemble.push_back(blurredPatch);
        }
    }
    return 0;
}
示例#2
0
RotatedRect doRect(double *conf,const double scale, const RotatedRect rect){
	
	Point2f center(conf[0]*scale,conf[1]*scale);
	Size2f sizeB(rect.size.width*conf[4]*scale,rect.size.height*conf[4]*scale);
	
	return RotatedRect(center,sizeB,conf[6]);
}
示例#3
0
文件: bleb.cpp 项目: nauhc/CellGUI
Bleb::Bleb()
{
    size = 0;
    center = Point(0, 0);
    bunch_polar.clear();
    roughArea = RotatedRect();
    bin = 0;
}
示例#4
0
RotatedRect CamShift( const Mat& probImage, Rect& window,
                      TermCriteria criteria )
{
    CvConnectedComp comp;
    CvBox2D box;
    CvMat _probImage = probImage;
    cvCamShift(&_probImage, window, (CvTermCriteria)criteria, &comp, &box);
    window = comp.rect;
    return RotatedRect(Point2f(box.center), Size2f(box.size), box.angle);
}
  IKSyntherError do_synther_error(MetaData&metadata)
  {
    IKSyntherError ikerr;

    // find the training example with closest pose to the testing example
    double min_dist;
    shared_ptr<MetaData> oracle_datum = do_closest_exemplar_ik(metadata,min_dist);

    // calc the pose error
    ikerr.pose_error = min_dist;

    // get the bbs
    vector<AnnotationBoundingBox> test_bbs  = metric_positive(metadata);    
    vector<AnnotationBoundingBox> train_bbs = metric_positive(*oracle_datum);
    if(test_bbs.size() > 0 and train_bbs.size() > 0)
    {
      // calc the template error.    
      AnnotationBoundingBox test_bb = test_bbs.front();
      ImRGBZ test_im = (*metadata.load_im())(test_bb);
      VolumetricTemplate test_templ(test_im,test_bb.depth,nullptr,RotatedRect());
      //
      AnnotationBoundingBox train_bb = train_bbs.front();
      ImRGBZ train_im = (*oracle_datum->load_im())(train_bb);
      VolumetricTemplate train_templ(train_im,train_bb.depth,nullptr,RotatedRect());
      //
      ikerr.template_error = 1 - test_templ.cor(train_templ);

      log_im_decay_freq("do_synther_error",[&]()
			{
			  Mat vis_test = imageeq("",test_templ.getTIm(),false,false);
			  Mat vis_train = imageeq("",train_templ.getTIm(),false,false);
			  
			  return horizCat(vis_test,vis_train);
			});
    }
    else
      ikerr.template_error = 1.0;

    return ikerr;
  }
示例#6
0
  /*
   * Draws the "robot" as a triangle facing in the
   * direction it's initialised as.
   */
void RobotSim::draw(Mat& src, bool draw_searchRadius)
{
  cv::RotatedRect Rekt = RotatedRect(Position, R_size, angle);
  Point2f vertices[4];
  Rekt.points(vertices);
  Point2f Front((vertices[2].x+vertices[3].x)/2, (vertices[2].y+vertices[3].y)/2);
  cv::line(src, vertices[0], Front, Scalar(255,0,0));
  cv::line(src, vertices[1], Front, Scalar(255,0,0));
  cv::line(src, vertices[0], vertices[1], Scalar(255,0,0));
  if(draw_searchRadius){
    circle(src, Position, searchRadius, Scalar(0,0,255), 2, 8, 0 );
  }
}
void FindObjectMain::process_camshift()
{
// Some user defined parameters
	int vmin = config.vmin;
	int vmax = config.vmax;
	int smin = config.smin;
	float hranges[] = { 0, 180 };
	const float* phranges = hranges;


// Create aligned, RGB images
	if(!object_image)
	{
		object_image = cvCreateImage( 
			cvSize(object_image_w, object_image_h), 
			8, 
			3);
	}

	if(!scene_image)
	{
		scene_image = cvCreateImage( 
			cvSize(scene_image_w, scene_image_h), 
			8, 
			3);
	}

// Temporary row pointers
	unsigned char **object_rows = new unsigned char*[object_image_h];
	unsigned char **scene_rows = new unsigned char*[scene_image_h];
	for(int i = 0; i < object_image_h; i++)
	{
		object_rows[i] = (unsigned char*)(object_image->imageData + i * object_image_w * 3);
	}
	for(int i = 0; i < scene_image_h; i++)
	{
		scene_rows[i] = (unsigned char*)(scene_image->imageData + i * scene_image_w * 3);
	}

// Transfer object & scene to RGB images for OpenCV
	if(!prev_object) prev_object = new unsigned char[object_image_w * object_image_h * 3];
// Back up old object image
	memcpy(prev_object, object_image->imageData, object_image_w * object_image_h * 3);

	BC_CModels::transfer(object_rows,
		get_input(object_layer)->get_rows(),
		0,
		0,
		0,
		0,
		0,
		0,
		object_x1,
		object_y1,
		object_w,
		object_h,
		0,
		0,
		object_w,
		object_h,
		get_input(object_layer)->get_color_model(),
		BC_RGB888,
		0,
		0,
		0);
	BC_CModels::transfer(scene_rows,
		get_input(scene_layer)->get_rows(),
		0,
		0,
		0,
		0,
		0,
		0,
		scene_x1,
		scene_y1,
		scene_w,
		scene_h,
		0,
		0,
		scene_w,
		scene_h,
		get_input(scene_layer)->get_color_model(),
		BC_RGB888,
		0,
		0,
		0);

	delete [] object_rows;
	delete [] scene_rows;

// from camshiftdemo.cpp
// Compute new object	
	if(memcmp(prev_object, 
		object_image->imageData, 
		object_image_w * object_image_h * 3) ||
		!hist.dims)
	{
		Mat image(object_image);
		Mat hsv, hue, mask;
		cvtColor(image, hsv, CV_RGB2HSV);
    	int _vmin = vmin, _vmax = vmax;
//printf("FindObjectMain::process_camshift %d\n", __LINE__);

    	inRange(hsv, 
			Scalar(0, smin, MIN(_vmin,_vmax)),
        	Scalar(180, 256, MAX(_vmin, _vmax)), 
			mask);
    	int ch[] = { 0, 0 };
    	hue.create(hsv.size(), hsv.depth());
    	mixChannels(&hsv, 1, &hue, 1, ch, 1);

		Rect selection = Rect(0, 0, object_w, object_h);
		trackWindow = selection;
		int hsize = 16;
		Mat roi(hue, selection), maskroi(mask, selection);
		calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
		normalize(hist, hist, 0, 255, CV_MINMAX);
	}


// compute scene
	Mat image(scene_image);
	Mat hsv, hue, mask, backproj;
	cvtColor(image, hsv, CV_RGB2HSV);
    int _vmin = vmin, _vmax = vmax;

    inRange(hsv, 
		Scalar(0, smin, MIN(_vmin,_vmax)),
        Scalar(180, 256, MAX(_vmin, _vmax)), 
		mask);
    int ch[] = {0, 0};
    hue.create(hsv.size(), hsv.depth());
    mixChannels(&hsv, 1, &hue, 1, ch, 1);
	
//printf("FindObjectMain::process_camshift %d %d %d\n", __LINE__, hist.dims, hist.size[1]);
	RotatedRect trackBox = RotatedRect(
		Point2f((object_x1 + object_x2) / 2, (object_y1 + object_y2) / 2), 
		Size2f(object_w, object_h), 
		0);
	trackWindow = Rect(0, 
		0,
        scene_w, 
		scene_h);
	if(hist.dims > 0)
	{
		

		calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
		backproj &= mask;
//printf("FindObjectMain::process_camshift %d\n", __LINE__);
// 		if(trackWindow.width <= 0 ||
// 			trackWindow.height <= 0)
// 		{
// 			trackWindow.width = object_w;
// 			trackWindow.height = object_h;
// 		}

		trackBox = CamShift(backproj, 
			trackWindow,
        	TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));
//printf("FindObjectMain::process_camshift %d\n", __LINE__);


//     	if( trackWindow.area() <= 1 )
//     	{
//         	int cols = backproj.cols;
// 			int rows = backproj.rows;
// 			int r = (MIN(cols, rows) + 5) / 6;
//         	trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
//                         	   trackWindow.x + r, trackWindow.y + r) &
//                     	  Rect(0, 0, cols, rows);
//     	}
	}
// printf("FindObjectMain::process_camshift %d %d %d %d %d\n", 
// __LINE__,
// trackWindow.x,
// trackWindow.y,
// trackWindow.width,
// trackWindow.height);


// Draw mask over scene
	if(config.draw_keypoints)
	{
		for(int i = 0; i < scene_h; i++)
		{
			switch(get_input(scene_layer)->get_color_model())
			{
				case BC_YUV888:
				{
					unsigned char *input = backproj.data + i * scene_image_w;
					unsigned char *output = get_input(scene_layer)->get_rows()[i + scene_y1] + scene_x1 * 3;
					for(int j = 0; j < scene_w; j++)
					{
						output[0] = *input;
						output[1] = 0x80;
						output[2] = 0x80;
						output += 3;
						input++;
					}
					break;
				}
			}
		}
	}

// Get object outline in the scene layer
// printf("FindObjectMain::process_camshift %d %d %d %d %d %d\n", 
// __LINE__,
// (int)trackBox.center.x,
// (int)trackBox.center.y,
// (int)trackBox.size.width,
// (int)trackBox.size.height,
// (int)trackBox.angle);
	double angle = trackBox.angle * 2 * M_PI / 360;
	double angle1 = atan2(-(double)trackBox.size.height / 2, -(double)trackBox.size.width / 2) + angle;
	double angle2 = atan2(-(double)trackBox.size.height / 2, (double)trackBox.size.width / 2) + angle;
	double angle3 = atan2((double)trackBox.size.height / 2, (double)trackBox.size.width / 2) + angle;
	double angle4 = atan2((double)trackBox.size.height / 2, -(double)trackBox.size.width / 2) + angle;
	double radius = sqrt(SQR(trackBox.size.height / 2) + SQR(trackBox.size.width / 2));
	border_x1 = (int)(trackBox.center.x + cos(angle1) * radius) + scene_x1;
	border_y1 = (int)(trackBox.center.y + sin(angle1) * radius) + scene_y1;
	border_x2 = (int)(trackBox.center.x + cos(angle2) * radius) + scene_x1;
	border_y2 = (int)(trackBox.center.y + sin(angle2) * radius) + scene_y1;
	border_x3 = (int)(trackBox.center.x + cos(angle3) * radius) + scene_x1;
	border_y3 = (int)(trackBox.center.y + sin(angle3) * radius) + scene_y1;
	border_x4 = (int)(trackBox.center.x + cos(angle4) * radius) + scene_x1;
	border_y4 = (int)(trackBox.center.y + sin(angle4) * radius) + scene_y1;

}
	BOOL CMT::processFrame(const Mat& imGray)
	{
		//------------------------------------------------------------------------------------------
		// Track keypoints
		//------------------------------------------------------------------------------------------
		vector<Point2f> pointsTracked;
		vector<UCHAR> status;

		this->isVisible = tracker.track(
			imPrev,
			imGray,
			pointsActive,
			pointsTracked,
			status
			);

		if (!this->isVisible)
		{
			this->isRetrying = TRUE;
			return this->isVisible;
		}
				

		//------------------------------------------------------------------------------------------
		// Keep successful classes
		//------------------------------------------------------------------------------------------
		vector<int> classesTracked;
		for (register UINT it = 0; it < classesActive.size(); ++it)
		{
			if (status[it]) classesTracked.push_back(classesActive[it]);
			// else if(status[it] == 0) classesActive[it] : failed to track
		}

		//------------------------------------------------------------------------------------------
		// Compute Descriptors
		// this->keypoints : Supplied by thread
		//------------------------------------------------------------------------------------------				
		MatND descriptors;
		descriptor->compute(imGray, this->keypoints, descriptors);


		//------------------------------------------------------------------------------------------
		// Match keypoints globally
		//------------------------------------------------------------------------------------------
		vector<Point2f> pointsMatchedGlobal;
		vector<int> classesMatchedGlobal;

		this->isVisible = matcher.matchGlobal(
			this->keypoints,
			descriptors,
			pointsMatchedGlobal,
			classesMatchedGlobal
			);
		
		if (!this->isVisible)
		{
			this->isRetrying = TRUE;
			return this->isVisible;
		}

		//------------------------------------------------------------------------------------------
		// pointsFused = { pointsTracked ¡ú pointsMatchedGlobal }
		//------------------------------------------------------------------------------------------
		vector<Point2f> pointsFused;
		vector<int> classesFused;

		if (this->isRetrying)
		{
			pointsTracked.clear();
			classesTracked.clear();
		}


		//-----------------------------------
		// Matches First
		//-----------------------------------
		this->isVisible = fusion.preferFirst(
			pointsMatchedGlobal,
			classesMatchedGlobal,
			pointsTracked,
			classesTracked,
			pointsFused,
			classesFused
			);

		if (!this->isVisible)
		{
			this->isRetrying = TRUE;
			return this->isVisible;
		}

		//------------------------------------------------------------------------------------------
		// Estimate the changes in Scale and Rotation of pointsFused from the initial Constellation
		//------------------------------------------------------------------------------------------
		float scale, rotation;
		
		this->isVisible = consensus.estimateScaleRotation(
			pointsFused,
			classesFused,
			scale,
			rotation
			);

		if (!this->isVisible)
		{
			this->isRetrying = TRUE;
			return this->isVisible;
		}
		

		//------------------------------------------------------------------------------------------
		// Find inliers and the center of their votes
		//------------------------------------------------------------------------------------------
		// votes : pointsFused - DeformsFG
		// votes = pointsFused - scale * rotate(vectorsFG[classes[it]], rotation);
		//------------------------------------------------------------------------------------------
		vector<Point2f> pointsInlier;
		vector<int> classesInlier;
				
		this->isVisible = consensus.findConsensus(
			pointsFused,
			classesFused,
			scale,
			rotation,
			center,
			pointsInlier,
			classesInlier
			);

		if (!this->isVisible)
		{
			this->isRetrying = TRUE;
			return this->isVisible;
		}

		
		//------------------------------------------------------------------------------------------
		// Match keypoints locally
		//------------------------------------------------------------------------------------------
		vector<Point2f> pointsMatchedLocal;
		vector<int> classesMatchedLocal;

		this->isVisible = matcher.matchLocal(
			this->keypoints,
			descriptors,
			scale,
			rotation,
			center,
			pointsMatchedLocal,
			classesMatchedLocal
			);
		
		if (!this->isVisible)
		{
			this->isRetrying = TRUE;
			return this->isVisible;
		}


		//------------------------------------------------------------------------------------------
		// pointsActive = { pointsInlier ¡ú pointsMatchedLocal }
		//------------------------------------------------------------------------------------------
		pointsActive.clear();
		classesActive.clear();

		this->isVisible = fusion.preferFirst(
			pointsMatchedLocal,
			classesMatchedLocal,
			pointsInlier,
			classesInlier,
			pointsActive,
			classesActive
			);
			
		if (!this->isVisible)
		{
			this->isRetrying = TRUE;
			return this->isVisible;
		}


		//------------------------------------------------------------------------------------------
		// Finalization
		//------------------------------------------------------------------------------------------
		bbRotated = RotatedRect(center, sizeInitial * scale, rotation / CV_PI * 180);
		imPrev = imGray;

		//// determineFailure with { verticle && center }
		//Point2f verticle[4];
		//this->bbRotated.points(verticle);
		//BOOL rangeValid = determineFailure(verticle);

		BOOL rangeValid = determineFailure(this->center);

		if (!rangeValid)
		{
			#ifdef DEBUG_PROGRAM
				CString cERROR;
				cERROR.Format(_T("(%.2f, %.2f)(%.2f, %.2f)(%.2f, %.2f)(%.2f, %.2f)(%.2f, %.2f)"),
					this->center.x, this->center.y,
					verticle[0].x, verticle[0].y,
					verticle[1].x, verticle[1].y,
					verticle[2].x, verticle[2].y,
					verticle[3].x, verticle[3].y
					);
				AfxMessageBox(cERROR);
			#endif

			return rangeValid;
		}

		this->distJournal();
		this->isRetrying = FALSE;
		return this->isVisible;
	}
void CMT::processFrame(Mat im_gray) {

    FILE_LOG(logDEBUG) << "CMT::processFrame() call";

    //Track keypoints
    vector<Point2f> points_tracked;
    vector<unsigned char> status;
    if(continuity_preserved) {
        tracker.track(im_prev, im_gray, points_active, points_tracked, status);
    }

    FILE_LOG(logDEBUG) << points_tracked.size() << " tracked points.";

    // keep only successful classes
    vector<int> classes_tracked;
    if (continuity_preserved) {

        for (size_t i = 0; i < classes_active.size(); i++) {
            if (status[i]) {
                classes_tracked.push_back(classes_active[i]);
            }

        }
    }

    //Detect keypoints, compute descriptors
    vector<KeyPoint> keypoints;
    detector->detect(im_gray, keypoints);

    FILE_LOG(logDEBUG) << keypoints.size() << " keypoints found.";

    Mat descriptors;
    descriptor->compute(im_gray, keypoints, descriptors);

    //Match keypoints globally
    vector<Point2f> points_matched_global;
    vector<int> classes_matched_global;
    matcher.matchGlobal(keypoints, descriptors, points_matched_global, classes_matched_global);

    FILE_LOG(logDEBUG) << points_matched_global.size() << " points matched globally.";

    //Fuse tracked and globally matched points
    vector<Point2f> points_fused;
    vector<int> classes_fused;
    fusion.preferFirst(points_tracked, classes_tracked, points_matched_global, classes_matched_global,
            points_fused, classes_fused);

    FILE_LOG(logDEBUG) << points_fused.size() << " points fused.";

    //Estimate scale and rotation from the fused points
    float scale;
    float rotation;
    consensus.estimateScaleRotation(points_fused, classes_fused, scale, rotation);

    FILE_LOG(logDEBUG) << "scale " << scale << ", " << "rotation " << rotation;

    //Find inliers and the center of their votes
    Point2f center;
    vector<Point2f> points_inlier;
    vector<int> classes_inlier;
    consensus.findConsensus(points_fused, classes_fused, scale, rotation,
            center, points_inlier, classes_inlier);

    FILE_LOG(logDEBUG) << points_inlier.size() << " inlier points.";
    FILE_LOG(logDEBUG) << "center " << center;

    //Match keypoints locally
    vector<Point2f> points_matched_local;
    vector<int> classes_matched_local;
    matcher.matchLocal(keypoints, descriptors, center, scale, rotation, points_matched_local, classes_matched_local);

    FILE_LOG(logDEBUG) << points_matched_local.size() << " points matched locally.";

    //Clear active points
    points_active.clear();
    classes_active.clear();

    //Fuse locally matched points and inliers
    fusion.preferFirst(points_matched_local, classes_matched_local, points_inlier, classes_inlier, points_active, classes_active);
//    points_active = points_fused;
//    classes_active = classes_fused;

    FILE_LOG(logDEBUG) << points_active.size() << " final fused points.";

    //TODO: Use theta to suppress result
    bb_rot = RotatedRect(center,  size_initial * scale, rotation/CV_PI * 180);

    continuity_preserved = continuity.check_for_continuity(points_active, bb_rot);

    if(continuity_preserved){
        std::cout << "continuity preserved!" << std::endl;
    }else{
        std::cout << "continuity broken!" << std::endl;
    }

    //Remember current image
    im_prev = im_gray;

    FILE_LOG(logDEBUG) << "CMT::processFrame() return";
}
void CMT::initialize(const Mat im_gray, const Rect rect)
{
    FILE_LOG(logDEBUG) << "CMT::initialize() call";

    //Remember initial size
    size_initial = rect.size();

    //Remember initial image
    im_prev = im_gray;

    //Compute center of rect
    Point2f center = Point2f(rect.x + rect.width/2.0, rect.y + rect.height/2.0);

    //Initialize rotated bounding box
    bb_rot = RotatedRect(center, size_initial, 0.0);

    //Initialize detector and descriptor
#if CV_MAJOR_VERSION > 2
    detector = cv::FastFeatureDetector::create();
    descriptor = cv::BRISK::create();
#else
    detector = FeatureDetector::create(str_detector);
    descriptor = DescriptorExtractor::create(str_descriptor);
#endif

    //Get initial keypoints in whole image and compute their descriptors
    vector<KeyPoint> keypoints;
    detector->detect(im_gray, keypoints);

    //Divide keypoints into foreground and background keypoints according to selection
    vector<KeyPoint> keypoints_fg;
    vector<KeyPoint> keypoints_bg;

    for (size_t i = 0; i < keypoints.size(); i++)
    {
        KeyPoint k = keypoints[i];
        Point2f pt = k.pt;

        if (pt.x > rect.x && pt.y > rect.y && pt.x < rect.br().x && pt.y < rect.br().y)
        {
            keypoints_fg.push_back(k);
        }

        else
        {
            keypoints_bg.push_back(k);
        }

    }

    //Create foreground classes
    vector<int> classes_fg;
    classes_fg.reserve(keypoints_fg.size());
    for (size_t i = 0; i < keypoints_fg.size(); i++)
    {
        classes_fg.push_back(i);
    }

    //Compute foreground/background features
    Mat descs_fg;
    Mat descs_bg;
    descriptor->compute(im_gray, keypoints_fg, descs_fg);
    descriptor->compute(im_gray, keypoints_bg, descs_bg);

    //Only now is the right time to convert keypoints to points, as compute() might remove some keypoints
    vector<Point2f> points_fg;
    vector<Point2f> points_bg;

    for (size_t i = 0; i < keypoints_fg.size(); i++)
    {
        points_fg.push_back(keypoints_fg[i].pt);
    }

    FILE_LOG(logDEBUG) << points_fg.size() << " foreground points.";

    for (size_t i = 0; i < keypoints_bg.size(); i++)
    {
        points_bg.push_back(keypoints_bg[i].pt);
    }

    //Create normalized points
    vector<Point2f> points_normalized;
    for (size_t i = 0; i < points_fg.size(); i++)
    {
        points_normalized.push_back(points_fg[i] - center);
    }

    //Initialize matcher
    matcher.initialize(points_normalized, descs_fg, classes_fg, descs_bg, center);

    //Initialize consensus
    consensus.initialize(points_normalized);

    //Create initial set of active keypoints
    for (size_t i = 0; i < keypoints_fg.size(); i++)
    {
        points_active.push_back(keypoints_fg[i].pt);
        classes_active = classes_fg;
    }

    //Initialize continuity
    continuity.initialize(points_active, bb_rot);

    FILE_LOG(logDEBUG) << "CMT::initialize() return";
}
示例#11
0
cv::RotatedRect cv::CamShift( InputArray _probImage, Rect& window,
                              TermCriteria criteria )
{
    CV_INSTRUMENT_REGION()

    const int TOLERANCE = 10;
    Size size;
    Mat mat;
    UMat umat;
    bool isUMat = _probImage.isUMat();

    if (isUMat)
        umat = _probImage.getUMat(), size = umat.size();
    else
        mat = _probImage.getMat(), size = mat.size();

    meanShift( _probImage, window, criteria );

    window.x -= TOLERANCE;
    if( window.x < 0 )
        window.x = 0;

    window.y -= TOLERANCE;
    if( window.y < 0 )
        window.y = 0;

    window.width += 2 * TOLERANCE;
    if( window.x + window.width > size.width )
        window.width = size.width - window.x;

    window.height += 2 * TOLERANCE;
    if( window.y + window.height > size.height )
        window.height = size.height - window.y;

    // Calculating moments in new center mass
    Moments m = isUMat ? moments(umat(window)) : moments(mat(window));

    double m00 = m.m00, m10 = m.m10, m01 = m.m01;
    double mu11 = m.mu11, mu20 = m.mu20, mu02 = m.mu02;

    if( fabs(m00) < DBL_EPSILON )
        return RotatedRect();

    double inv_m00 = 1. / m00;
    int xc = cvRound( m10 * inv_m00 + window.x );
    int yc = cvRound( m01 * inv_m00 + window.y );
    double a = mu20 * inv_m00, b = mu11 * inv_m00, c = mu02 * inv_m00;

    // Calculating width & height
    double square = std::sqrt( 4 * b * b + (a - c) * (a - c) );

    // Calculating orientation
    double theta = atan2( 2 * b, a - c + square );

    // Calculating width & length of figure
    double cs = cos( theta );
    double sn = sin( theta );

    double rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02;
    double rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02;
    double length = std::sqrt( rotate_a * inv_m00 ) * 4;
    double width = std::sqrt( rotate_c * inv_m00 ) * 4;

    // In case, when tetta is 0 or 1.57... the Length & Width may be exchanged
    if( length < width )
    {
        std::swap( length, width );
        std::swap( cs, sn );
        theta = CV_PI*0.5 - theta;
    }

    // Saving results
    int _xc = cvRound( xc );
    int _yc = cvRound( yc );

    int t0 = cvRound( fabs( length * cs ));
    int t1 = cvRound( fabs( width * sn ));

    t0 = MAX( t0, t1 ) + 2;
    window.width = MIN( t0, (size.width - _xc) * 2 );

    t0 = cvRound( fabs( length * sn ));
    t1 = cvRound( fabs( width * cs ));

    t0 = MAX( t0, t1 ) + 2;
    window.height = MIN( t0, (size.height - _yc) * 2 );

    window.x = MAX( 0, _xc - window.width / 2 );
    window.y = MAX( 0, _yc - window.height / 2 );

    window.width = MIN( size.width - window.x, window.width );
    window.height = MIN( size.height - window.y, window.height );

    RotatedRect box;
    box.size.height = (float)length;
    box.size.width = (float)width;
    box.angle = (float)((CV_PI*0.5+theta)*180./CV_PI);
    while(box.angle < 0)
        box.angle += 360;
    while(box.angle >= 360)
        box.angle -= 360;
    if(box.angle >= 180)
        box.angle -= 180;
    box.center = Point2f( window.x + window.width*0.5f, window.y + window.height*0.5f);

    return box;
}
示例#12
0
RotatedRect scaleRect(const RotatedRect rect, const cv::Size2f scale){
	return RotatedRect(Point2f(rect.center.x*scale.width,rect.center.y*scale.height), 
					   cv::Size(rect.size.width*scale.width,rect.size.height*scale.height), 
					   rect.angle);
}
示例#13
0
void setSistemConfig(config_SystemParameter *param, string fileParam){
	
	//system param - Directory
	param->angleVector.clear();
	param->scaleVector.clear();
	
	//READING THE PATCH'S POSITION
	int length;
	char * buffer;
	
	ifstream is;
	is.open (fileParam.c_str(), ios::binary );
	
	// get length of file:
	is.seekg (0, ios::end);
	length = is.tellg();
	is.seekg (0, ios::beg);
	// allocate memory:
	buffer = new char [length];
	// read data as a block:
	is.read (buffer,length);
	
	string content(buffer,length);
	stringstream ss(content);
	string centerS,centerS_x,centerS_y,sizeS,sizeS_w,sizeS_h,angleS, 
	       norm,rgb,videoPath,
	       modelMemory,alpha,
	       areaSearch,angleVector,scaleVector,
		   time;
	
	getline(ss, time);
	getline(ss, centerS);
	getline(ss, sizeS);
	getline(ss, angleS);
	getline(ss, norm);
	getline(ss, videoPath);
	getline(ss, rgb);
	getline(ss, modelMemory);
	getline(ss, alpha);
	getline(ss, areaSearch);
	getline(ss, scaleVector);
	getline(ss, angleVector);
	
	//Time range
	param->startTimeOnFrames = isFrame(time.substr(0,time.find(',')));
	param->endTimeOnFrames = isFrame(time.substr(time.find(',')+1));
	
	param->startTime = time2msec(time.substr(0,time.find(',')));
	param->endTime = time2msec(time.substr(time.find(',')+1));
	
	//initialRect
	centerS_x = centerS.substr(0,centerS.find(','));
	centerS_y = centerS.substr(centerS.find(',')+1);
	sizeS_w = sizeS.substr(0,sizeS.find(','));
	sizeS_h = sizeS.substr(sizeS.find(',')+1);
	
	
	Point2f center(atof(centerS_x.c_str()),atof(centerS_y.c_str()));
	cv::Size size(atof(sizeS_w.c_str()),atof(sizeS_h.c_str()));
	
	param->initialRect = RotatedRect(center,size,atof(angleS.c_str()));
	
	//NormType
	if (!norm.compare("FACE")) param->normType = FACE;//*normType = FACE;
	else if(!norm.compare("PEDESTRIAN")) param->normType = PEDESTRIAN;//*normType = PEDESTRIAN;
	else {
		cout << "NOT SUCH NORM: "<<norm<<endl;
		CV_Assert(false);
	}
	
	//RGB
	if (!rgb.compare("1")){ 
		param->isRGB = true;
		param->numFeature = 12;
	}
	else if(!rgb.compare("0")){
		param->isRGB = false;
		param->numFeature = 10;
	}
	else {
		cout << "INVALID RGB VALUE: "<<rgb<<endl;
		CV_Assert(false);
	}
	
	//video path
	param->videoPath = videoPath;
	
	//modelMemory
	param->modelMemory = atof(modelMemory.c_str());
	//alpha
	param->alpha = atof(alpha.c_str());
	//areaSearch
	param->areaSearch = atof(areaSearch.c_str());
	
	//scaleVector
	int index = 0;
	string aux;
	while (index<scaleVector.length()) {
		aux = scaleVector.substr(index,scaleVector.length());
		aux = aux.substr(0,aux.find_first_of(','));
		index += aux.length()+1;
		param->scaleVector.push_back(atof(aux.c_str()));
	}
	//anglesVector
	index = 0;
	while (index<angleVector.length()) {
		aux = angleVector.substr(index,angleVector.length());
		aux = aux.substr(0,aux.find_first_of(','));
		index += aux.length()+1;
		param->angleVector.push_back(atof(aux.c_str()));
	}
	is.close();
	delete[] buffer;
}