示例#1
0
/*  Trains the classifier on warps of a bounding-box patch.
    frame: frame to take warps from
    bb: first-frame bounding-box [x, y, width, height] */
void bbWarpPatch(IntegralImage *frame, double *bb) {
    // Transformation matrix
    float *m = new float[4];
    
    // Loop through various rotations and skews
    for (float r = -0.1f; r < 0.1f; r += 0.005f) {
        float sine = sin(r);
        float cosine = cos(r);
        
        for (float sx = -0.1f; sx < 0.1f; sx += 0.05f) {
            for (float sy = -0.1f; sy < 0.1f; sy += 0.05f) {
                // Set transformation
                /*  Rotation matrix * skew matrix =
                    
                    | cos r   sin r | * | 1   sx | = 
                    | -sin r  cos r |   | sy   1 |
                    
                    | cos r + sy * sin r   sx * cos r + sin r |
                    | sy * cos r - sin r   cos r - sx * sin r | */
                m[0] = cosine + sy * sine;
                m[1] = sx * cosine + sine;
                m[2] = sy * cosine - sine;
                m[3] = cosine - sx * sine;
                
                // Create warp and train classifier
                IntegralImage *warp = new IntegralImage();
                warp->createWarp(frame, bb, m);
                classifier->train(warp, 0, 0, (int)bb[2], (int)bb[3], 1);
                delete warp;
            }
        }
    }
    
    delete m;
}
示例#2
0
文件: TLD.cpp 项目: changjie/BPTLD
//  用边框盒围住的小块的仿射变换来训练分类器
//  frame: 输入的用于仿射变换的图像
//  bb: 第一帧中的边框盒[x,y,width,height]
void bbWarpPatch(IntegralImage *frame, double *bb) {
    // 变换矩阵
    float *m = new float[4];
    
	// 循环穿过各种旋转和斜交参数
    for (float r = -0.1f; r < 0.1f; r += 0.005f) {
        float sine = sin(r);
        float cosine = cos(r);
        
        for (float sx = -0.1f; sx < 0.1f; sx += 0.05f) {
            for (float sy = -0.1f; sy < 0.1f; sy += 0.05f) {
                // 设置转换矩阵
                /*  Rotation matrix * skew matrix =
                    
                    | cos r   sin r | * | 1   sx | = 
                    | -sin r  cos r |   | sy   1 |
                    
                    | cos r + sy * sin r   sx * cos r + sin r |
                    | sy * cos r - sin r   cos r - sx * sin r | */
                m[0] = cosine + sy * sine;
                m[1] = sx * cosine + sine;
                m[2] = sy * cosine - sine;
                m[3] = cosine - sx * sine;
                
				// 创建仿射变换,然后训练分类器
                IntegralImage *warp = new IntegralImage();
                warp->createWarp(frame, bb, m);
                classifier->train(warp, 1, 1, (int)bb[2], (int)bb[3], 1);
                delete warp;
            }
        }
    }
    
    delete m;
}
示例#3
0
文件: TLD.cpp 项目: changjie/BPTLD
// 初始化 --------------------------------------------------------
// 参数1帧宽,参数2帧高,参数3初始第一帧,参数4初始边框盒
void BpTld_Init(int Width,int Height,IplImage * firstImage,double * firstbb)
{
	// 获取图像参数
	frameWidth = Width;
	frameHeight = Height;
	frameSize = (CvSize *)malloc(sizeof(CvSize));
	*frameSize = cvSize(frameWidth, frameHeight);
	IntegralImage *firstFrame = new IntegralImage();
	firstFrame->createFromIplImage(firstImage);
	IplImage *firstFrameIplImage = firstImage;
	double *bb = firstbb;
	initBBWidth = (float)bb[2];
	initBBHeight = (float)bb[3];
	//初始化信任度
	confidence = 1.0f;

	// 初始化分类器,跟踪器和探测器
	srand((unsigned int)time(0));
	classifier = new Classifier(TOTAL_FERNS, TOTAL_NODES, MIN_FEATURE_SCALE, MAX_FEATURE_SCALE);
	tracker = new Tracker(frameWidth, frameHeight, frameSize, firstFrameIplImage, classifier);
	detector = new Detector(frameWidth, frameHeight, bb, classifier);

	// 用初始图像小块和它的仿射变形来训练分类器
	classifier->train(firstFrame, (int)bb[0], (int)bb[1], (int)initBBWidth, (int)initBBHeight, 1);
	bbWarpPatch(firstFrame, bb);
	trainNegative(firstFrame, bb);

	// 释放内存
	delete firstFrame;
	// 设置bool值initialised
	initialised = true;

	return;
}
示例#4
0
int Detector::detect(const Mat &image, int max_num, CB_RectT *pt_rects, int &subwin_count)
{
	int image_w = image.cols;
	int image_h = image.rows;

	int feature_types = ptr_model->p_ft_param->getFeatureTypes();
	IntegralImage intg;
	intg.init(image_w, image_h, feature_types);
	intg.compute(image.data);

	int num = detect(intg, max_num, pt_rects, subwin_count);
	return num;
}
int PositiveExtractor::extractSamples(const TrainParamsT *pt_params)
{
	IntegralImage intg;

	_chdir(pt_params->positive_pool_dir.c_str());
	CFileFind file_finder;
    bool is_working = file_finder.FindFile();
	int count = 0;
	while (is_working)
	{
		is_working = file_finder.FindNextFile();
		string file_name = file_finder.GetFileName();
		string file_path = file_finder.GetFilePath();

		if (file_name == "." || file_name == ".."|| file_name == "Thumbs.db")
		{
			continue;
		}

		printf("%s\n", file_path.c_str());

		Mat image = imread(file_path, CV_LOAD_IMAGE_GRAYSCALE);
		if (image.data == NULL)
		{
			continue;
		}

		int width = image.cols;
		int height = image.rows;

		if (width < pt_params->template_w || height < pt_params->template_h)
		{
			continue;
		}
	
		intg.init(width, height, pt_params->feature_type);
		intg.compute(image.data);
		intg.save(pt_params->positive_data_path);
		count++;
 	}
	return count;
}
示例#6
0
int Detector::preTest(IntegralImage &intg, SubwinInfoT &subwin)
{
	intg.computeSubwinMeanVar(subwin);

	if (subwin.mean >= MAX_MEAN || subwin.mean <= MIN_MEAN)
	{
		return 0;
	}

	if (subwin.var < MIN_VAR || subwin.var > MAX_VAR)
	{
		return 0;
	}
	return 1;
}
示例#7
0
void * buildResponseLayer_work(void * threadarg)
{
	struct thread_data * my_data = (struct thread_data *) threadarg;
	int thread_id = my_data->thread_id;
	FastHessian::ResponseLayer * rl = my_data->rl;
	IntegralImage * img = my_data->img;
	int step = my_data->step;                      // step size for this filter
	int b = my_data->b;             // border for this filter
	int l = my_data->l;                   // lobe for this filter (filter size / 3)
	int w = my_data->w;                       // filter size
	float inverse_area = my_data->inverse_area;       // normalisation factor
	float Dxx, Dyy, Dxy;

	unsigned int start = (rl->height / NUM_THREADS) * thread_id;
	unsigned int end = (rl->height / NUM_THREADS) + start;
	if (end > rl->height) end  = rl->height;

	for (int r, c, ar = start, index = start * rl->width; ar < end; ++ar)
	{
		for (int ac = 0; ac < rl->width; ++ac, index++)
		{

			// get the image coordinates
			r = ar * step;
			c = ac * step;

			// Compute response components
			Dxx = img->BoxIntegral(r - l + 1, c - b, 2 * l - 1, w)
			    - img->BoxIntegral(r - l + 1, c - l / 2, 2 * l - 1, l) * 3;
			Dyy = img->BoxIntegral(r - b, c - l + 1, w, 2 * l - 1)
			    - img->BoxIntegral(r - l / 2, c - l + 1, l, 2 * l - 1) * 3;
			Dxy = + img->BoxIntegral(r - l, c + 1, l, l)
			      + img->BoxIntegral(r + 1, c - l, l, l)
			      - img->BoxIntegral(r - l, c - l, l, l)
			      - img->BoxIntegral(r + 1, c + 1, l, l);

			// Normalise the filter responses with respect to their size
			Dxx *= inverse_area;
			Dyy *= inverse_area;
			Dxy *= inverse_area;

			// Get the determinant of hessian response & laplacian sign
			rl->responses[index] = (Dxx * Dyy - (float)0.81 * Dxy * Dxy);
			rl->laplacian[index] = (unsigned char)(Dxx + Dyy >= 0 ? 1 : 0);
		}
	}
}
示例#8
0
void teste_integral_2(IntegralImage ii){
    for(register int i=0;i<4;++i){
        for(register int j=0;j<4;++j){
            printf("%lu ",ii.data()[i][j]);     
        }
        printf("\n");
    }

    Point ardis;
    ardis.y = 24;
    ardis.x = 24;

    MaskTwoHorizontalFactory m2hf(ardis,1,1,0,2,1);
    MaskTwoVerticalFactory m2vf(ardis,1,1,0,1,2);
    MaskThreeHorizontalFactory m3hf(ardis,1,1,0,3,1);
    MaskThreeVerticalFactory m3vf(ardis,1,1,0,1,3);
    MaskDiagonalFactory mdf(ardis,1,1,0,2,2);

    FMF factories[5];
    std::vector<FeatureMask> _facesFeatures;
    factories[0] = m2hf;
    factories[1] = m2vf;
    factories[2] = m3hf;
    factories[3] = m3vf;
    factories[4] = mdf;
    
    int counter=0;
    for(int i=0;i<5;i++){
        while( factories[i].hasNext()==1 ){
            _facesFeatures.push_back( factories[i].next(counter++) );
        }        
    }    
    
    printf("%d\n", _facesFeatures.size());

    FeatureMask fm = m2hf.next();
    printf("M2HF: %lu\n",ii.filter( fm ) );

    fm = m2vf.next();
    printf("M2VF: %lu\n",ii.filter( fm ) );

    fm = m3hf.next();
    printf("M3HF: %lu\n",ii.filter( fm ) );

    fm = m3vf.next();
    printf("M3VF: %lu\n",ii.filter( fm ) ); 

    fm = mdf.next();
    printf("MDF: %lu\n",ii.filter( fm ) );  
}
示例#9
0
/*  Entry point for mex.
    Call form: [left, hand, side, outs] = Detector(right, hand, side, args)
    Either use:
    To initialise:
        TLD(frame width, frame height, first frame, selected bounding-box)
    To process a frame:
        new trajectory bounding-box = TLD(current frame, trajectory bounding-box)
    
    nlhs: number of left-hand side outputs
    plhs: the left-hand side outputs
    nrhs: number of right-hand side arguments
    prhs: the right-hand side arguments */
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
    // Initialisation --------------------------------------------------------
    if (nlhs == 0 && nrhs == 4) {
        // Get input
        frameWidth = (int)*mxGetPr(prhs[0]);
        frameHeight = (int)*mxGetPr(prhs[1]);
        frameSize = (CvSize *)malloc(sizeof(CvSize));
        *frameSize = cvSize(frameWidth, frameHeight);
        IntegralImage *firstFrame = new IntegralImage();
        firstFrame->createFromMatlab(prhs[2]);
        IplImage *firstFrameIplImage = imageFromMatlab(prhs[2]);
        double *bb = mxGetPr(prhs[3]);
        initBBWidth = (float)bb[2];
        initBBHeight = (float)bb[3];
        confidence = 1.0f;
        
        // Initialise classifier, tracker and detector
        srand((unsigned int)time(0));
        classifier = new Classifier(TOTAL_FERNS, TOTAL_NODES, MIN_FEATURE_SCALE, MAX_FEATURE_SCALE);
        tracker = new Tracker(frameWidth, frameHeight, frameSize, firstFrameIplImage, classifier);
        detector = new Detector(frameWidth, frameHeight, bb, classifier);
        
        // Train the classifier on the bounding-box patch and warps of it
        classifier->train(firstFrame, (int)bb[0], (int)bb[1], (int)initBBWidth, (int)initBBHeight, 1);
        bbWarpPatch(firstFrame, bb);
        trainNegative(firstFrame, bb);
        
        // Free memory and set initialised
        delete firstFrame;
        initialised = true;
        
        return;
    }
    
    
    // Validate --------------------------------------------------------------
    // The remainder of this function handles the frame processing call
    // Ensure we get the correct call form Matlab and are initialised
    if (!initialised || nlhs != 1 || nrhs != 2) {
        // Error
        return;
    }
    
    
    // Get Input -------------------------------------------------------------
    // Current frame
    IplImage *nextFrame = imageFromMatlab(prhs[0]);
    IntegralImage *nextFrameIntImg = new IntegralImage();
    nextFrameIntImg->createFromMatlab(prhs[0]);
    
    // Trajectory bounding-box [x, y, width, height]
    double *bb = mxGetPr(prhs[1]);
    
    
    // Track and Detect ------------------------------------------------------
    // Only track if we were confident enough in the previous iteration
    // The tracker handles the memory freeing of nextFrame from here on
    double *tbb;
    vector<double *> *dbbs;
    
    if (confidence > MIN_TRACKING_CONF) {
        tbb = tracker->track(nextFrame, nextFrameIntImg, bb);
        dbbs = detector->detect(nextFrameIntImg, tbb);
    } else {
        dbbs = detector->detect(nextFrameIntImg, NULL);
        tracker->setPrevFrame(nextFrame);
        tbb = new double[5];
        tbb[0] = 0;
        tbb[1] = 0;
        tbb[2] = 0;
        tbb[3] = 0;
        tbb[4] = MIN_TRACKING_CONF;
    }
    
    
    // Learn -----------------------------------------------------------------
    // Get greatest detected patch confidence
    double dbbMaxConf = 0.0f;
    int dbbMaxConfIndex = -1;
    
    for (int i = 0; i < dbbs->size(); i++) {
        double dbbConf = dbbs->at(i)[4];
        
        if (dbbConf > dbbMaxConf) {
            dbbMaxConf = dbbConf;
            dbbMaxConfIndex = i;
        }
    }
    
    // Reset the tracker bounding-box if a detected patch had highest
    // confidence and is more confident than MIN_REINIT_CONF
    if (dbbMaxConf > tbb[4] && dbbMaxConf > MIN_REINIT_CONF) {
        delete tbb;
        tbb = new double[5];
        double *dbb = dbbs->at(dbbMaxConfIndex);
        tbb[0] = dbb[0];
        tbb[1] = dbb[1];
        tbb[2] = dbb[2];
        tbb[3] = dbb[3];
        tbb[4] = dbb[4];
    }
    
    // Apply constraints if the tracked patch had the greatest confidence and
    // we were confident enough last frame
    else if (tbb[4] > dbbMaxConf && confidence > MIN_LEARNING_CONF) {
        for (int i = 0; i < dbbs->size(); i++) {
            // Train the classifier on positive (overlapping with tracked
            // patch) and negative (classed as positive but non-overlapping)
            // patches
            double *dbb = dbbs->at(i);
            
            if (dbb[5] == 1) {
                classifier->train(nextFrameIntImg, (int)dbb[0], (int)dbb[1], (int)dbb[2], (int)dbb[3], 1);
            }
            else if (dbb[5] == 0) {
                classifier->train(nextFrameIntImg, (int)dbb[0], (int)dbb[1], (int)dbb[2], (int)dbb[3], 0);
            }
        }
    }
    
    // Set confidence for next iteration
    confidence = tbb[4];
    
    
    // Set output ------------------------------------------------------------
    // We output a list of bounding-boxes; the first bounding-box defines the
    // tracked patch, the rest are detected positive match patches.
    // Rows correspond to individual bounding boxes
    // Columns correspond to [x, y, width, height, confidence, overlapping]
    int bbCount = (int)dbbs->size() + 1;
    plhs[0] = mxCreateDoubleMatrix(bbCount, 6, mxREAL);
    double *outputBBs = mxGetPr(plhs[0]);
    
    // Set the tracked bounding-box
    outputBBs[0 * bbCount] = tbb[0];
    outputBBs[1 * bbCount] = tbb[1];
    outputBBs[2 * bbCount] = tbb[2];
    outputBBs[3 * bbCount] = tbb[3];
    outputBBs[4 * bbCount] = tbb[4];
    outputBBs[5 * bbCount] = 0;
    
    // Set detected bounding-boxes
    for (int i = 1; i < bbCount; i++) {
        double *bb = dbbs->at(i - 1);
        outputBBs[0 * bbCount + i] = bb[0];
        outputBBs[1 * bbCount + i] = bb[1];
        outputBBs[2 * bbCount + i] = bb[2];
        outputBBs[3 * bbCount + i] = bb[3];
        outputBBs[4 * bbCount + i] = bb[4];
        outputBBs[5 * bbCount + i] = bb[5];
        delete bb;
    }
    
    // Free memory
    free(tbb);
    dbbs->clear();
    delete nextFrameIntImg;
}
void videocallback(IplImage *image)
{
    static IplImage *img_grad = NULL;
    static IplImage *img_gray = NULL;
    static IplImage *img_ver = NULL;
    static IplImage *img_hor = NULL;
    static IplImage *img_canny = NULL;
    static IntegralImage integ;
    static IntegralGradient grad;

    assert(image);
    if (img_gray == NULL) {
        // Following image is toggled visible using key '0'
        img_grad = CvTestbed::Instance().CreateImageWithProto("Gradient", image);
        CvTestbed::Instance().ToggleImageVisible(0);
        img_gray = CvTestbed::Instance().CreateImageWithProto("Grayscale", image, 0, 1);
        img_ver = CvTestbed::Instance().CreateImage("Vertical", cvSize(1,image->height), IPL_DEPTH_8U, 1);
        img_hor = CvTestbed::Instance().CreateImage("Horizontal", cvSize(image->width,1), IPL_DEPTH_8U, 1);
        img_canny = CvTestbed::Instance().CreateImageWithProto("Canny", image, 0, 1);
        img_canny->origin = img_ver->origin = img_hor->origin = image->origin;
    }
    if (image->nChannels > 1) { 
        cvCvtColor(image, img_gray, CV_RGB2GRAY);
    } else {
        cvCopy(image, img_gray);
    }

    // Show PerformanceTimer
    //PerformanceTimer timer;
    //timer.Start();

    // Update the integral images
    integ.Update(img_gray);
    grad.Update(img_gray);

    // Whole image projections
    integ.GetSubimage(cvRect(0,0,image->width,image->height), img_ver);
    integ.GetSubimage(cvRect(0,0,image->width,image->height), img_hor);
    for (int y=1; y<image->height; y++) {
        cvLine(image, 
               cvPoint(int(cvGet2D(img_ver, y-1, 0).val[0]), y-1), 
               cvPoint(int(cvGet2D(img_ver, y, 0).val[0]), y), 
               CV_RGB(255,0,0));
    }
    for (int x=1; x<image->width; x++) {
        cvLine(image, 
               cvPoint(x-1, int(cvGet2D(img_hor, 0, x-1).val[0])), 
               cvPoint(x, int(cvGet2D(img_hor, 0, x).val[0])), 
               CV_RGB(0,255,0));
    }

    // Gradients
    // Mark gradients for 4x4 sub-blocks
    /*
    cvZero(img_grad);
    CvRect r = {0,0,4,4};
    for (int y=0; y<image->height/4; y++) {
        r.y = y*4;
        for (int x=0; x<image->width/4; x++) {
            r.x = x*4;
            double dirx, diry;
            grad.GetAveGradient(r, &dirx, &diry);
            cvLine(img_grad, cvPoint(r.x+2,r.y+2), cvPoint(r.x+2+int(dirx),r.y+2+int(diry)), CV_RGB(255,0,0));
        }
    }
    */

    // Gradients on canny
    cvZero(img_grad);
    static int t1=64, t2=192;
    cvCreateTrackbar("t1", "Gradient", &t1, 255, NULL);
    cvCreateTrackbar("t2", "Gradient", &t2, 255, NULL);
    cvCanny(img_gray, img_canny, t1, t2);
    CvRect r = {0,0,4,4};
    for (r.y=0; r.y<img_canny->height-4; r.y++) {
        for (r.x=0; r.x<img_canny->width-4; r.x++) {
            if (img_canny->imageData[r.y*img_canny->widthStep+r.x]) {
                double dirx, diry;
                grad.GetAveGradient(r, &dirx, &diry);
                cvLine(img_grad, cvPoint(r.x+2,r.y+2), cvPoint(r.x+2+int(dirx),r.y+2+int(diry)), CV_RGB(0,0,255));
                cvLine(img_grad, cvPoint(r.x+2,r.y+2), cvPoint(r.x+2+int(-diry),r.y+2+int(+dirx)), CV_RGB(255,0,0));
                cvLine(img_grad, cvPoint(r.x+2,r.y+2), cvPoint(r.x+2+int(+diry),r.y+2+int(-dirx)), CV_RGB(255,0,0));
            }
        }
    }

    // Show PerformanceTimer
    //cout<<"Processing: "<<1.0 / timer.Stop()<<" fps"<<endl;
}
float HaarFeature::computeFeature(IntegralImage &intg, const SubwinInfoT &subwin, const HaarFeatureInfoT &haar)
{
	int cur_scan_pos_x = subwin.win_pos.x;
	int cur_scan_pos_y = subwin.win_pos.y;

	double cur_scan_scale = (double)subwin.win_size.x / haar.tpl_size.x;
	HaarFeatureInfoT haar_feature = haar;

	haar_feature.pos1.x = haar_feature.pos1.x * cur_scan_scale;
	haar_feature.pos1.y = haar_feature.pos1.y * cur_scan_scale;
	haar_feature.pos2.x = haar_feature.pos2.x * cur_scan_scale;
	haar_feature.pos2.y = haar_feature.pos2.y * cur_scan_scale;
	haar_feature.size.x *= cur_scan_scale;
	haar_feature.size.y *= cur_scan_scale;

	double real_scale_square = (double)(haar_feature.size.x * haar_feature.size.y) / (haar.size.x * haar.size.y);

	double result;
	switch (haar_feature.type)
	{
	case HFT_X_AB:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middle = x_left + haar_feature.size.x - 1;
			int x_right = x_middle + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_bottom = y_top + haar_feature.size.y - 1;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middle+1, x_right, y_top, y_bottom};

			double result1 = intg.getRectValue_0(rect1);
			double result2 = intg.getRectValue_0(rect2);
			result = result1 - 2 * result2;
			break;
		}
	case HFT_Y_AB:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_right = x_left + haar_feature.size.x - 1;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middle = y_top + haar_feature.size.y - 1;
			int y_bottom = y_middle + haar_feature.size.y;
		
			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_left, x_right, y_middle+1, y_bottom};

			double result1 = intg.getRectValue_0(rect1);
			double result2 = intg.getRectValue_0(rect2);
			result = result1 - 2 * result2;
			break;
		}
	case HFT_X_ABA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middleL = x_left + haar_feature.size.x - 1;
			int x_middleR = x_middleL + haar_feature.size.x;
			int x_right = x_middleR + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_bottom = y_top + haar_feature.size.y - 1;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middleL + 1, x_middleR, y_top, y_bottom};

			double result1 = intg.getRectValue_0(rect1); 
			double result2 = intg.getRectValue_0(rect2);
			result = result1 - 3 * result2;
			break;
		}
	case HFT_Y_ABA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_right = x_left + haar_feature.size.x - 1;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middleT = y_top + haar_feature.size.y - 1;
			int y_middleB = y_middleT + haar_feature.size.y;
			int y_bottom = y_middleB + haar_feature.size.y;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_left, x_right, y_middleT + 1, y_middleB};

			double result1 = intg.getRectValue_0(rect1);
			double result2 = intg.getRectValue_0(rect2);
			result = result1 - 3 * result2;
			break;
		}
	case HFT_X_ABBA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middleL = x_left + haar_feature.size.x - 1;
			int x_middleR = x_middleL + haar_feature.size.x * 2;
			int x_right = x_middleR + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_bottom = y_top + haar_feature.size.y - 1;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middleL + 1, x_middleR, y_top, y_bottom};

			double result1 = intg.getRectValue_0(rect1); 
			double result2 = intg.getRectValue_0(rect2);
			result = result1 - 2 * result2;
			break;
		}
	case HFT_Y_ABBA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_right = x_left + haar_feature.size.x - 1;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middleT = y_top + haar_feature.size.y - 1;
			int y_middleB = y_middleT + haar_feature.size.y * 2;
			int y_bottom = y_middleB + haar_feature.size.y;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_left, x_right, y_middleT + 1, y_middleB};

			double result1 = intg.getRectValue_0(rect1);
			double result2 = intg.getRectValue_0(rect2);
			result = result1 - 2 * result2;
			break;
		}
	case HFT_XY_ABA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middleL = x_left + haar_feature.size.x - 1;
			int x_middleR = x_middleL + haar_feature.size.x;
			int x_right = x_middleR + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middleT = y_top + haar_feature.size.y - 1;
			int y_middleB = y_middleT + haar_feature.size.y;
			int y_bottom = y_middleB + haar_feature.size.y;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middleL + 1, x_middleR, y_middleT + 1, y_middleB};

			double result1 = intg.getRectValue_0(rect1);
			double result2 = intg.getRectValue_0(rect2);
			result = result1 - 9 * result2;
			break;
		}
	case HFT_XY_ABBA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middleL = x_left + haar_feature.size.x - 1;
			int x_middleR = x_middleL + haar_feature.size.x * 2;
			int x_right = x_middleR + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middleT = y_top + haar_feature.size.y - 1;
			int y_middleB = y_middleT + haar_feature.size.y * 2;
			int y_bottom = y_middleB + haar_feature.size.y;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middleL + 1, x_middleR, y_middleT + 1, y_middleB};

			double result1 = intg.getRectValue_0(rect1);
			double result2 = intg.getRectValue_0(rect2);
			result = result1 - 4 * result2;
			break;
		}
	case HFT_L_AB:
		{
			int x = cur_scan_pos_x + haar_feature.pos1.x;
			int y = cur_scan_pos_y + haar_feature.pos1.y;
			CB_SlantT slant1 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect1;
			slantToRect(slant1, rect1, subwin.image_size);

			x = rect1.left.x;
			y = rect1.left.y;
			CB_SlantT slant2 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect2;
			slantToRect(slant2, rect2, subwin.image_size);

			double result1 = intg.getRectValue_45(rect1);
			double result2 = intg.getRectValue_45(rect2);

			result = result1 - result2;
			break;
		}
	case HFT_R_AB:
		{
			int x = cur_scan_pos_x + haar_feature.pos1.x;
			int y = cur_scan_pos_y + haar_feature.pos1.y;
			CB_SlantT slant1 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect1;
			slantToRect(slant1, rect1, subwin.image_size);

			x = rect1.right.x;
			y = rect1.right.y;
			CB_SlantT slant2 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect2;
			slantToRect(slant2, rect2, subwin.image_size);

			double result1 = intg.getRectValue_45(rect1);
			double result2 = intg.getRectValue_45(rect2);

			result = result1 - result2;
			break;
		}
	case HFT_L_ABA:
		{
			int x = cur_scan_pos_x + haar_feature.pos1.x;
			int y = cur_scan_pos_y + haar_feature.pos1.y;
			CB_SlantT slant1 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect1;
			slantToRect(slant1, rect1, subwin.image_size);

			x = rect1.left.x;
			y = rect1.left.y;
			CB_SlantT slant2 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect2;
			slantToRect(slant2, rect2, subwin.image_size);

			x = rect2.left.x;
			y = rect2.left.y;
			CB_SlantT slant3 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect3;
			slantToRect(slant3, rect3, subwin.image_size);

			double result1 = intg.getRectValue_45(rect1);
			double result2 = intg.getRectValue_45(rect2);
			double result3 = intg.getRectValue_45(rect3);

			result = result1 + result3 - 2 * result2;
			break;
		}
	case HFT_R_ABA:
		{
			int x = cur_scan_pos_x + haar_feature.pos1.x;
			int y = cur_scan_pos_y + haar_feature.pos1.y;
			CB_SlantT slant1 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect1;
			slantToRect(slant1, rect1, subwin.image_size);

			x = rect1.right.x;
			y = rect1.right.y;
			CB_SlantT slant2 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect2;
			slantToRect(slant2, rect2, subwin.image_size);

			x = rect2.right.x;
			y = rect2.right.y;
			CB_SlantT slant3 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect3;
			slantToRect(slant3, rect3, subwin.image_size);

			double result1 = intg.getRectValue_45(rect1);
			double result2 = intg.getRectValue_45(rect2);
			double result3 = intg.getRectValue_45(rect3);

			result = result1 + result3 - 2 * result2;
			break;
		}
	case HFT_L_ABBA:
		{
			int x = cur_scan_pos_x + haar_feature.pos1.x;
			int y = cur_scan_pos_y + haar_feature.pos1.y;
			CB_SlantT slant1 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect1;
			slantToRect(slant1, rect1, subwin.image_size);

			x = rect1.left.x;
			y = rect1.left.y;
			CB_SlantT slant2 = {x, y, haar_feature.size.x * 2, haar_feature.size.y};
			CB_RectangleT rect2;
			slantToRect(slant2, rect2, subwin.image_size);

			x = rect2.left.x;
			y = rect2.left.y;
			CB_SlantT slant3 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect3;
			slantToRect(slant3, rect3, subwin.image_size);

			double result1 = intg.getRectValue_45(rect1);
			double result2 = intg.getRectValue_45(rect2);
			double result3 = intg.getRectValue_45(rect3);

			result = result1 + result3 - result2;
			break;
		}
	case HFT_R_ABBA:
		{
			int x = cur_scan_pos_x + haar_feature.pos1.x;
			int y = cur_scan_pos_y + haar_feature.pos1.y;
			CB_SlantT slant1 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect1;
			slantToRect(slant1, rect1, subwin.image_size);

			x = rect1.right.x;
			y = rect1.right.y;
			CB_SlantT slant2 = {x, y, haar_feature.size.x, haar_feature.size.y * 2};
			CB_RectangleT rect2;
			slantToRect(slant2, rect2, subwin.image_size);

			x = rect2.right.x;
			y = rect2.right.y;
			CB_SlantT slant3 = {x, y, haar_feature.size.x, haar_feature.size.y};
			CB_RectangleT rect3;
			slantToRect(slant3, rect3, subwin.image_size);

			double result1 = intg.getRectValue_45(rect1);
			double result2 = intg.getRectValue_45(rect2);
			double result3 = intg.getRectValue_45(rect3);

			result = result1 + result3 - result2;
			break;
		}
	case HFT_A_B:
		{
			int x_left1 = cur_scan_pos_x + haar_feature.pos1.x;
			int x_right1 = x_left1 + haar_feature.size.x - 1;
			int y_top1 = cur_scan_pos_y + haar_feature.pos1.y;
			int y_bottom1 = y_top1 + haar_feature.size.y - 1;

			int x_left2 = cur_scan_pos_x + haar_feature.pos2.x;
			int x_right2 = x_left2 + haar_feature.size.x - 1;
			int y_top2 = cur_scan_pos_y + haar_feature.pos2.y;
			int y_bottom2 = y_top2 + haar_feature.size.y - 1;

			CB_RectT rect1 = {x_left1, x_right1, y_top1, y_bottom1};
			CB_RectT rect2 = {x_left2, x_right2, y_top2, y_bottom2};

			double result1 = intg.getRectValue_0(rect1);
			double result2 = intg.getRectValue_0(rect2);
			result = result1 - result2;
			break;
		}
	case HFT_SQ_X_AB:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middle = x_left + haar_feature.size.x - 1;
			int x_right = x_middle + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_bottom = y_top + haar_feature.size.y - 1;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middle+1, x_right, y_top, y_bottom};

			double result1 = intg.getRectSqValue_0(rect1);
			double result2 = intg.getRectSqValue_0(rect2);
			result = result1 - 2 * result2;
			break;
		}
	case HFT_SQ_Y_AB:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_right = x_left + haar_feature.size.x - 1;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middle = y_top + haar_feature.size.y - 1;
			int y_bottom = y_middle + haar_feature.size.y;
		
			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_left, x_right, y_middle+1, y_bottom};

			double result1 = intg.getRectSqValue_0(rect1);
			double result2 = intg.getRectSqValue_0(rect2);
			result = result1 - 2 * result2;
			break;
		}
	case HFT_SQ_X_ABA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middleL = x_left + haar_feature.size.x - 1;
			int x_middleR = x_middleL + haar_feature.size.x;
			int x_right = x_middleR + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_bottom = y_top + haar_feature.size.y - 1;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middleL + 1, x_middleR, y_top, y_bottom};

			double result1 = intg.getRectSqValue_0(rect1); 
			double result2 = intg.getRectSqValue_0(rect2);
			result = result1 - 3 * result2;
			break;
		}
	case HFT_SQ_Y_ABA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_right = x_left + haar_feature.size.x - 1;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middleT = y_top + haar_feature.size.y - 1;
			int y_middleB = y_middleT + haar_feature.size.y;
			int y_bottom = y_middleB + haar_feature.size.y;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_left, x_right, y_middleT + 1, y_middleB};

			double result1 = intg.getRectSqValue_0(rect1);
			double result2 = intg.getRectSqValue_0(rect2);
			result = result1 - 3 * result2;
			break;
		}
	case HFT_SQ_X_ABBA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middleL = x_left + haar_feature.size.x - 1;
			int x_middleR = x_middleL + haar_feature.size.x * 2;
			int x_right = x_middleR + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_bottom = y_top + haar_feature.size.y - 1;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middleL + 1, x_middleR, y_top, y_bottom};

			double result1 = intg.getRectSqValue_0(rect1); 
			double result2 = intg.getRectSqValue_0(rect2);
			result = result1 - 2 * result2;
			break;
		}
	case HFT_SQ_Y_ABBA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_right = x_left + haar_feature.size.x - 1;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middleT = y_top + haar_feature.size.y - 1;
			int y_middleB = y_middleT + haar_feature.size.y * 2;
			int y_bottom = y_middleB + haar_feature.size.y;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_left, x_right, y_middleT + 1, y_middleB};

			double result1 = intg.getRectSqValue_0(rect1);
			double result2 = intg.getRectSqValue_0(rect2);
			result = result1 - 2 * result2;
			break;
		}
	case HFT_SQ_XY_ABA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middleL = x_left + haar_feature.size.x - 1;
			int x_middleR = x_middleL + haar_feature.size.x;
			int x_right = x_middleR + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middleT = y_top + haar_feature.size.y - 1;
			int y_middleB = y_middleT + haar_feature.size.y;
			int y_bottom = y_middleB + haar_feature.size.y;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middleL + 1, x_middleR, y_middleT + 1, y_middleB};

			double result1 = intg.getRectSqValue_0(rect1);
			double result2 = intg.getRectSqValue_0(rect2);
			result = result1 - 9 * result2;
			break;
		}
	case HFT_SQ_XY_ABBA:
		{
			int x_left = cur_scan_pos_x + haar_feature.pos1.x;
			int x_middleL = x_left + haar_feature.size.x - 1;
			int x_middleR = x_middleL + haar_feature.size.x * 2;
			int x_right = x_middleR + haar_feature.size.x;
			int y_top = cur_scan_pos_y + haar_feature.pos1.y;
			int y_middleT = y_top + haar_feature.size.y - 1;
			int y_middleB = y_middleT + haar_feature.size.y * 2;
			int y_bottom = y_middleB + haar_feature.size.y;

			CB_RectT rect1 = {x_left, x_right, y_top, y_bottom};
			CB_RectT rect2 = {x_middleL + 1, x_middleR, y_middleT + 1, y_middleB};

			double result1 = intg.getRectSqValue_0(rect1);
			double result2 = intg.getRectSqValue_0(rect2);
			result = result1 - 4 * result2;
			break;
		}
	case HFT_SQ_A_B:
		{
			int x_left1 = cur_scan_pos_x + haar_feature.pos1.x;
			int x_right1 = x_left1 + haar_feature.size.x - 1;
			int y_top1 = cur_scan_pos_y + haar_feature.pos1.y;
			int y_bottom1 = y_top1 + haar_feature.size.y - 1;

			int x_left2 = cur_scan_pos_x + haar_feature.pos2.x;
			int x_right2 = x_left2 + haar_feature.size.x - 1;
			int y_top2 = cur_scan_pos_y + haar_feature.pos2.y;
			int y_bottom2 = y_top2 + haar_feature.size.y - 1;

			CB_RectT rect1 = {x_left1, x_right1, y_top1, y_bottom1};
			CB_RectT rect2 = {x_left2, x_right2, y_top2, y_bottom2};

			double result1 = intg.getRectSqValue_0(rect1);
			double result2 = intg.getRectSqValue_0(rect2);
			result = result1 - result2;
			break;
		}
	default:
		break;
	}
	result *= haar_feature.inv_area;
	result = result / subwin.var;
	result /= real_scale_square;

	if (haar_feature.type <HFT_STEP1 && haar.is_abs == 1)
	{
		result = fabs(result);
	}
	return result;
}
示例#12
0
void GlutOrientation::Apply(ImageOf<PixelRgb>& src,
				ImageOf<PixelRgb>& dest,
				ImageOf<PixelFloat>& xdata, 
				ImageOf<PixelFloat>& ydata, 
				ImageOf<PixelFloat>& mdata)
{
  int view = 1; //(dest.width()>0);
  ImageOf<PixelFloat>& orient = fine_orientation_data.Orient();
  static ImageOf<PixelMono> mask;
  static IntegralImage ii;
  if (view)
    {
      dest.resize(src);
    }
  for (int i=0;i<SHIFTS;i++)
    {
      shifts[i].resize(src);
    }
  mean.resize(src);

  static int shifts_x[SHIFTS], shifts_y[SHIFTS];
  
  int ct = 0;
  for (int dx=-1; dx<=2; dx++)
    {
      for (int dy=-1; dy<=2; dy++)
	{
	  assert(ct<SHIFTS);
	  ii.Offset(src,shifts[ct],dx,dy,1);
	  shifts_x[ct] = dx;
	  shifts_y[ct] = dy;
	  ct++;
	}
    }

  static ImageOf<PixelFloat> mean, var, lx, ly, agree;
  ImageOf<PixelRgb>& mono = src;
  SatisfySize(mono,mean);
  SatisfySize(mono,var);
  SatisfySize(mono,lx);
  SatisfySize(mono,ly);
  SatisfySize(mono,agree);
  SatisfySize(mono,xdata);
  SatisfySize(mono,ydata);
  SatisfySize(mono,mdata);
  int response_ct = 0;
  IMGFOR(mono,x,y)
    {
      float total = 0;
      float total2 = 0;
      PixelRgb& pix0 = src(x,y);
      for (int k=0; k<SHIFTS; k++)
	{
	  PixelRgb& pix1 = shifts[k](x,y);
	  float v = pdist(pix0,pix1);
	  total += v;
#ifdef USE_LUMINANCE_FILTER
	  total2 += v*v;
#endif
	}
      mean(x,y) = total/SHIFTS;
#ifdef USE_LUMINANCE_FILTER
      var(x,y) = total2/SHIFTS - (total/SHIFTS)*(total/SHIFTS);
#endif
    }
示例#13
0
文件: TLD.cpp 项目: changjie/BPTLD
/*  MEX入口点.
    两种使用方法:
    初始化:
        TLD(帧宽, 帧高, 第一帧, 选择出的边框盒)
    处理每一帧:
        新的轨迹边框盒 = TLD(当前帧, 轨迹边框盒)
*/
void BpTld_Process(IplImage * NewImage,double * ttbb,double * outPut) {
    
    // 获得输入 -------------------------------------------------------------
    // 当前帧
    IplImage *nextFrame = NewImage;
    IntegralImage *nextFrameIntImg = new IntegralImage();
    nextFrameIntImg->createFromIplImage(NewImage);
    
	// 轨迹边框盒[x, y, width, height]
    double *bb = ttbb;
    
    
    // 跟踪 + 探测 ------------------------------------------------------
	// 只有在上个迭代中我们足够自信的时候,才跟踪
	// 从这开始,跟踪器处理下一帧内存
    double *tbb;
    vector<double *> *dbbs;
    
    if (confidence > MIN_TRACKING_CONF) {
        tbb = tracker->track(nextFrame, nextFrameIntImg, bb);
		if (tbb[4] > MIN_TRACKING_CONF)
		{
			dbbs = detector->detect(nextFrameIntImg, tbb);
		} 
		else
		{
			dbbs = detector->detect(nextFrameIntImg, NULL);
		}
        
    } else {
        dbbs = detector->detect(nextFrameIntImg, NULL);
        tracker->setPrevFrame(nextFrame);
        tbb = new double[5];
        tbb[0] = 0;
        tbb[1] = 0;
        tbb[2] = 0;
        tbb[3] = 0;
        tbb[4] = MIN_TRACKING_CONF;
    }
    
    
    // 学习 -----------------------------------------------------------------
    // 获得最好的探测出的小块的信任度
    double dbbMaxConf = 0.0f;
    int dbbMaxConfIndex = -1;
    
    for (int i = 0; i < dbbs->size(); i++) {
        double dbbConf = dbbs->at(i)[4];
        
        if (dbbConf > dbbMaxConf) {
            dbbMaxConf = dbbConf;
            dbbMaxConfIndex = i;
        }
    }
 //   //
	//if (dbbMaxConfIndex >= 0)
	//{	testDetector.x = dbbs->at(dbbMaxConfIndex)[0];
	//testDetector.y = dbbs->at(dbbMaxConfIndex)[1];
	//testDetector.width = dbbs->at(dbbMaxConfIndex)[2];
	//testDetector.height = dbbs->at(dbbMaxConfIndex)[3];
	//}

	////
	// 如果探测出的小块中有一个信任度最高,并且大于MIN_REINIT_CONF
	// 那么就重置跟踪器的边框盒
    if (dbbMaxConf > tbb[4] && dbbMaxConf > MIN_REINIT_CONF) {
        delete tbb;
        tbb = new double[5];
        double *dbb = dbbs->at(dbbMaxConfIndex);
        tbb[0] = dbb[0];
        tbb[1] = dbb[1];
        tbb[2] = dbb[2];
        tbb[3] = dbb[3];
        tbb[4] = dbb[4];
    }
    
	// 如果跟踪出的小块的信任度最高并且最后一帧时足够自信,那么就启动约束。
    else if (tbb[4] > dbbMaxConf && confidence > MIN_LEARNING_CONF) {
        for (int i = 0; i < dbbs->size(); i++) {
			// 用正向和负向小块训练分类器
			// 正向-与被跟踪的小块重合
			// 负向-被分类为正向但与被跟踪的小块不重合
            double *dbb = dbbs->at(i);
            
            if (dbb[5] == 1) {
                classifier->train(nextFrameIntImg, (int)dbb[0], (int)dbb[1], (int)dbb[2], (int)dbb[3], 1);
            }
            else if (dbb[5] == 0) {
                classifier->train(nextFrameIntImg, (int)dbb[0], (int)dbb[1], (int)dbb[2], (int)dbb[3], 0);
            }
        }
    }
    
	// 为下个迭代设置信任度
    confidence = tbb[4];

	//设置输出
	outPut[0] = tbb[0];
	outPut[1] = tbb[1];
	outPut[2] = tbb[2];
	outPut[3] = tbb[3];

    
    
	//////////////////////////////////////////////////////////////////////////
	//此处tbb[0],tbb[1],tbb[2],tbb[3]即是最终的边框盒
	//如果tbb[2],tbb[3]全都大于0,,就表明估算出物体位置
	//////////////////////////////////////////////////////////////////////////
    // 设置输出 ------------------------------------------------------------
	// 我们输出一系列边框盒;第一个是跟踪出的小块,剩下的是探测出的小块
    // Rows correspond to individual bounding boxes
    // Columns correspond to [x, y, width, height, confidence, overlapping]
    
    // 释放内存
    free(tbb);
    dbbs->clear();
    delete nextFrameIntImg;
}
示例#14
0
int main(int argc, char **argv) {

  if(argc!=3){
    printf("Usage: imageFindSoma img soma.gr\n");
    exit(0);
  }

  string nameImg(argv[1]);
  string nameGraph(argv[2]);

  Image<float> * img  = new Image<float>(nameImg);
  IntegralImage* iimg = new IntegralImage(img);

  // Image<float>* res1 = img->create_blank_image_float("/media/neurons/findSoma/n7s1.png");
  // Image<float>* res2 = res1->create_blank_image_float("/media/neurons/findSoma/n7s2.png");
  // Image<float>* res  = res2->create_blank_image_float("/media/neurons/findSoma/n7s.png");

  //And now finds the soma (just a simple integral over 50x50 px
  int step = 30;
  // res1->put_all(255);
  // res2->put_all(255);
  // res ->put_all(255);
  int x1, y1, xS, yS;
  float value;
  float minValue = 255;

  printf("Doing the first pass\n");
  //Now the displacement will be included into the computation
  for(int disp = 0; disp < step; disp+=5){
    printf("disp = %i\n", disp);
    //Initial search
    for(int y0 = disp; y0 < img->height - step; y0+=step){
      for(int x0 = disp; x0 < img->width - step; x0+=step){
        value = iimg->integral(x0,y0,x0+step,y0+step)/(step*step);
        if(value < minValue){
          minValue = value;
          xS = x0;
          yS = y0;
        }
        // for(int i = x0; i <= x0+step; i++)
          // for(int j = y0; j <= y0+step; j++)
            // res1->put(i,j,value);
      }
    }
  }

  // printf("Saving the result\n");
  // for(int x = xS; x < xS+step; x++)
    // for(int y = yS; y < yS+step; y++){
      // res->put(x,y,0);
    // }

  printf("Saving the images\n");
  // iimg->save();
  // res1->save();
  // res2->save();
  // res->save();

  //And now the graph with the contour will be saved
  printf("Saving the graph\n");
  Graph<Point2Do, EdgeW<Point2Do> >* gr =
    new Graph<Point2Do, EdgeW<Point2Do> >();
  int x, y;
  y = yS;
  vector<float> smicrom(2);
  vector<int> sindex(2);
  int stepContour = 3;
  sindex[0] = xS; sindex[1]=yS;
  img->indexesToMicrometers(sindex, smicrom);
  for(x = 0; x <= step; x+=stepContour){
    gr->cloud->points.push_back
      (new Point2Do(smicrom[0]+x, smicrom[1], M_PI/2));
  }
  for(y = 1; y <= step-1; y+=stepContour){
    gr->cloud->points.push_back
      (new Point2Do(smicrom[0]+step, smicrom[1]-y, 0));
  }
  for(x = step; x>=0; x-=stepContour){
    gr->cloud->points.push_back
      (new Point2Do(smicrom[0]+x, smicrom[1]-step, M_PI/2));
  }
  for(y = step-1; y>=1; y-=stepContour){
    gr->cloud->points.push_back
      (new Point2Do(smicrom[0], smicrom[1]-y, 0));
  }
  for(int i = 0; i < gr->cloud->points.size()-1; i++)
    gr->eset.edges.push_back
      (new EdgeW<Point2Do>(&gr->cloud->points, i, i+1, 1));
  gr->eset.edges.push_back
    (new EdgeW<Point2Do>(&gr->cloud->points, 0, gr->cloud->points.size()-1, 1));
  gr->v_radius = 0.2;
  gr->cloud->v_radius = 0.2;
  gr->saveToFile(nameGraph);

}