Esempio n. 1
0
RawImage* DisparityMap::ProcessInput(CommandLineArgModel* arg, RawImage* image)
{
	DisparityMapModel* model = (DisparityMapModel*)arg->ParsedModel;
	Rectangle left, roi = model->Roi, right;

	left.Width = roi.Width / 2;
	right.Width = roi.Width / 2;

	left.X = roi.X;
	right.X = roi.X + left.Width;

	left.Right = roi.Right - right.Width;
	right.Right = roi.Right;

	left.Height = right.Height = roi.Height;
	left.Bottom = right.Bottom = roi.Bottom;
	left.Y = right.Y = roi.Y;

	Mat leftImg = image->CloneToMat(left);
	Mat rightImg = image->CloneToMat(right);

	cvtColor(leftImg, leftImg, CV_BGR2GRAY);
	cvtColor(rightImg, rightImg, CV_BGR2GRAY);

	Ptr<StereoSGBM> stereo = StereoSGBM::create(
		0,		//minDisparity
		144,	//numDisparities
		3,		//blockSize
		3 * 3 * 4, 
		3 * 3 * 32, 
		1,		//disp12MaxDiff
		10,		//preFilterCap
		10,		//uniquenessRatio
		100,	//speckleWindowSize
		32,		//speckleRange
		true);	//mode

	Mat disparity;
	stereo->compute(rightImg, leftImg, disparity);

	double max, min;
	minMaxIdx(disparity, &min, &max);
	convertScaleAbs(disparity, disparity, 255 / max);

	cvtColor(disparity, disparity, CV_GRAY2RGB);

	imwrite("F:\\1\\raw-stereo.disparity.opencv.png", disparity);
	
	RawImage* newImage = new RawImage(left.Width, left.Height);


	newImage->Import(disparity, 0, 0);

	return newImage;
}
Esempio n. 2
0
void cv::minMaxLoc( InputArray _img, double* minVal, double* maxVal,
                Point* minLoc, Point* maxLoc, InputArray mask )
{
    Mat img = _img.getMat();
    CV_Assert(img.dims <= 2);
    
    minMaxIdx(_img, minVal, maxVal, (int*)minLoc, (int*)maxLoc, mask);
    if( minLoc )
        std::swap(minLoc->x, minLoc->y);
    if( maxLoc )
        std::swap(maxLoc->x, maxLoc->y);
}
Esempio n. 3
0
/*
 * Edge function compares binarized image with gradien image, throw AND Operator
 * The algorithm checks for white pixels in both images and if true then apply white to final image
 *
 * @param binarized		Contains binarized image
 * @param gradient		Contains gradient image
 *
 * @return edgeImage 	Contains the result image
 */
Mat edge(const Mat& binarized, const Mat& gradient){
	Size s = binarized.size();
	int xmax = s.height;
	int ymax = s.width;

	Mat edgeImage(xmax,ymax, CV_8UC1, Scalar(0));


	for (int i=0; i < xmax; i++){
		for (int j=0; j < ymax; j++){
			//If both pixels are equals and white, then we apply white to the final image (AND Operator)
			if(binarized.at<uchar>(i,j) == gradient.at<uchar>(i,j) && binarized.at<uchar>(i,j) == MAX_BRIGHTNESS_8)
				edgeImage.at<uchar>(i,j) = MAX_BRIGHTNESS_8;
			else
				edgeImage.at<uchar>(i,j) = 0;
		}
	}

	double min;
		double max;
		minMaxIdx(edgeImage, &min, &max);
		cout << "Max: " << max << "Minnn: " << min << endl;
		waitKey(0);
/*
	imwrite( "edge.tiff", edgeImage );
	imwrite( "binary.tiff", binarized );
	imwrite( "gradient.tiff", gradient );

	Mat image;
	binarized.convertTo(image, CV_32SC1);
	imwrite( "image.tiff", image );
*/

	imshow("edgeimage", edgeImage);
	waitKey(0);
	imshow("binarizada", binarized);
	waitKey(0);
	imshow("gradiente", gradient);
	waitKey(0);

	return gradient;
}
	void SMaxReg::predict(const Mat &data, const Mat &label, Mat &predictLabel, 
					      Mat &prob, double &accuracy)
	{
		prob = data * weight;
		predictLabel = Mat::zeros(1, data.rows, CV_32FC1);
		int *maxLoc = (int *)calloc(2, sizeof(int));
		float *pptr = (float *)predictLabel.data;
		float *lptr = (float *)label.data;
		accuracy = 0;
		for (int i = 0; i < data.rows; ++i) {
			minMaxIdx(prob.row(i), NULL, NULL, NULL, maxLoc);
			*pptr++ = maxLoc[1];
			if ((*lptr++) == maxLoc[1])
				accuracy++;
		}
		accuracy /= data.rows;

		if (maxLoc != NULL) free(maxLoc); 
		maxLoc = NULL;
		lptr = NULL;
	}
Esempio n. 5
0
void SetCard::DetectColor(void)
{
    Mat M;
    Mat C;
    Mat HSV;
    bitwise_and(mCutBW, mCutMask, M);
    //bitwise_and(mCutImg, mCutImg, C, M);
    bitwise_and(mCutNormColors, mCutNormColors, C, M);

    cvtColor(C, HSV, mOptions->mColor2HSV);
    vector<Mat> CHNL(3);
    split(HSV, CHNL);

    vector<int> nGPR(3);
    nGPR[0] = CountNonZeroInRage(CHNL[0],  25, 90);  // green
    nGPR[1] = CountNonZeroInRage(CHNL[0], 140, 170); // purple
    nGPR[2] = CountNonZeroInRage(CHNL[0], 170, 255); // red
    double maxV;
    int i;
    minMaxIdx(Mat(nGPR), NULL, &maxV, NULL, &i);
    mCardProperties.mColor = (CARD_COLORS)i;
}
Esempio n. 6
0
// estimate covariance matrix
void LSPredictionComputer::estimate(const Point3i& currentPos) {
	Mat sampleVector, weightedSampleVector;
	context->contextOf(currentPos, sampleVector); // get current neighborhood and store it in sampleVector

	// init covMat
	covMat->create(context->getFullNeighborhood().getNumberOfElements(), context->getFullNeighborhood().getNumberOfElements() + 1, CV_64F);
	*covMat = (*covMat)(Rect(0, 0, sampleVector.cols + 1, sampleVector.cols)); // Rect(x, y, width, height)
	*covMat = Scalar(0.0); // set covariance matrix to zero
	sampleVector.reshape(0, sampleVector.cols).copyTo(covMat->col(covMat->cols - 1)); // put neighborhood in last column for variance estimate
	sampleVector = sampleVector.colRange(0, sampleVector.cols - 1); // remove last (current) pixel

	// other neighborhood is used for matching (weight computation) than for prediction?
	Mat weightingVector;
	if(weightingContext) {
		weightingContext->checkBorder(currentPos);
		if(weightingContext->isBorder()) context->setTrainingregion(weightingContext->getTrainingregion()); // use smaller training region for context
		weightingContext->contextOf(currentPos, weightingVector); // get current matching neighborhood and store it in weightingVector
		weightingVector = weightingVector.colRange(0, weightingVector.cols - 1); // remove last (current) pixel
		otherWeightingFunction->setReferencePoint(weightingVector); // set as reference for block matching to compute weights
		weightingContext->getContextElementsOf(currentPos);
	} else weightingFunction->setReferencePoint(sampleVector); // set as reference for block matching to compute weights

	// init weights
	weights->create(1, context->getFullTrainingregion().getNumberOfElements(), CV_64F); // reset to maximum size (should not need memory re-allocation)
	*weights = weights->colRange(0, context->getTrainingregion().getNumberOfElements()); // set used region
	double* weightsPtr = weights->ptr<double>();

	if(maxTrainingVectors) {
		Mat sampleVectors(maxTrainingVectors, context->getNeighborhood().getNumberOfElements(), CV_64F, Scalar(0.0));
		Mat correspondingWeights(maxTrainingVectors, 1, CV_64F, Scalar(0.0));
		context->getContextElementsOf(currentPos);
		while(!context->getNextContextElement(sampleVector)) {
			if(weightingContext) {
				weightingContext->getNextContextElement(weightingVector);
				*weightsPtr = otherWeightingFunction->computeWeight(weightingVector);
			} else *weightsPtr = weightingFunction->computeWeight(sampleVector); // do weighting for WLS and store weight
			if(*(weightsPtr++)) {
				int index[2];
				minMaxIdx(correspondingWeights, NULL, NULL, index);
				correspondingWeights.at<double>(index[0], 0) = *(weightsPtr - 1);
				sampleVector.copyTo(sampleVectors.row(index[0]));
			}
		}
		double minWeight; minMaxIdx(correspondingWeights, &minWeight, NULL);
		weightsPtr = weights->ptr<double>() - 1;
		for(int i = 0; i < weights->cols; ++i) if(*(++weightsPtr) < minWeight) *weightsPtr = 0; // set small weights to zero
		for(int i = 0; i < maxTrainingVectors; ++i) {
			weightedSampleVector = sampleVectors.row(i) * correspondingWeights.at<double>(i, 0);
			const double* const sampleVectorPtr = sampleVectors.ptr<double>(i);
			const double* const weightedSampleVectorPtr = weightedSampleVector.ptr<double>();
			for(int k = 0; k < covMat->rows; ++k) {
				double* covMatPtr = covMat->ptr<double>(k) + k;
				for(int l = k; l < sampleVector.cols; ++l) *(covMatPtr++) += sampleVectorPtr[k] * weightedSampleVectorPtr[l];
			}
		}
	} else {
		context->getContextElementsOf(currentPos);
		while(!context->getNextContextElement(sampleVector)) {
			if(weightingContext) {
				weightingContext->getNextContextElement(weightingVector);
				weightedSampleVector = sampleVector * (*(weightsPtr++) = otherWeightingFunction->computeWeight(weightingVector));
			} else weightedSampleVector = sampleVector * (*(weightsPtr++) = weightingFunction->computeWeight(sampleVector)); // do weighting for WLS and store weight
			const double* const sampleVectorPtr = sampleVector.ptr<double>();
			const double* const weightedSampleVectorPtr = weightedSampleVector.ptr<double>();
			for(int k = 0; k < covMat->rows; ++k) {
				double* covMatPtr = covMat->ptr<double>(k) + k;
				for(int l = k; l < sampleVector.cols; ++l) *(covMatPtr++) += sampleVectorPtr[k] * weightedSampleVectorPtr[l];
			}
		}
	}
	*covMat = covMat->rowRange(0, covMat->rows - 1); // make last row invisible for computePrediction function of WLS
	for(int k = 1; k < covMat->rows; ++k) { // copy values from upper triangular matrix
		double* covMatPtr = covMat->ptr<double>(k);
		for(int l = 0; l < k; ++l) *(covMatPtr++) = covMat->at<double>(l, k);
	}
} // end LSPredictionComputer::estimate
Esempio n. 7
0
    float predict( InputArray _inputs, OutputArray _outputs, int ) const
    {
        if( !trained )
            CV_Error( CV_StsError, "The network has not been trained or loaded" );

        Mat inputs = _inputs.getMat();
        int type = inputs.type(), l_count = layer_count();
        int n = inputs.rows, dn0 = n;

        CV_Assert( (type == CV_32F || type == CV_64F) && inputs.cols == layer_sizes[0] );
        int noutputs = layer_sizes[l_count-1];
        Mat outputs;

        int min_buf_sz = 2*max_lsize;
        int buf_sz = n*min_buf_sz;

        if( buf_sz > max_buf_sz )
        {
            dn0 = max_buf_sz/min_buf_sz;
            dn0 = std::max( dn0, 1 );
            buf_sz = dn0*min_buf_sz;
        }

        cv::AutoBuffer<double> _buf(buf_sz+noutputs);
        double* buf = _buf;

        if( !_outputs.needed() )
        {
            CV_Assert( n == 1 );
            outputs = Mat(n, noutputs, type, buf + buf_sz);
        }
        else
        {
            _outputs.create(n, noutputs, type);
            outputs = _outputs.getMat();
        }

        int dn = 0;
        for( int i = 0; i < n; i += dn )
        {
            dn = std::min( dn0, n - i );

            Mat layer_in = inputs.rowRange(i, i + dn);
            Mat layer_out( dn, layer_in.cols, CV_64F, buf);

            scale_input( layer_in, layer_out );
            layer_in = layer_out;

            for( int j = 1; j < l_count; j++ )
            {
                double* data = buf + ((j&1) ? max_lsize*dn0 : 0);
                int cols = layer_sizes[j];

                layer_out = Mat(dn, cols, CV_64F, data);
                Mat w = weights[j].rowRange(0, layer_in.cols);
                gemm(layer_in, w, 1, noArray(), 0, layer_out);
                calc_activ_func( layer_out, weights[j] );

                layer_in = layer_out;
            }

            layer_out = outputs.rowRange(i, i + dn);
            scale_output( layer_in, layer_out );
        }

        if( n == 1 )
        {
            int maxIdx[] = {0, 0};
            minMaxIdx(outputs, 0, 0, 0, maxIdx);
            return (float)(maxIdx[0] + maxIdx[1]);
        }

        return 0.f;
    }
Esempio n. 8
0
vector<int> FRID::getFeaturePoints()
{
	int i,j;
	vector<int> r;
	uint _order = getOrder();
	int sz=w4*h4;
	Mat sobelx, gradx, sobely,grady,sobel;
	Mat sobelgray_,sobelgray__,sobelgray;
	int totalPoints;


	Sobel(orderMap[_order],sobelx,CV_16S,1,0,3,1, 0, BORDER_DEFAULT );
	convertScaleAbs( sobelx, gradx );
	Sobel(orderMap[_order],sobely,CV_16S,0,1,3,1, 0, BORDER_DEFAULT );
	convertScaleAbs( sobely, grady );
	addWeighted( gradx, 0.5, grady, 0.5, 0, sobel );

	cvtColor(sobel,sobelgray_,CV_RGB2GRAY);
	sobelgray_.convertTo(sobelgray__,CV_8UC1);

	double min,max;
	int maxid[2];
	minMaxIdx(sobelgray__,&min,&max,0,maxid);

	threshold(sobelgray__,sobelgray__,(int)(((float)max)/255*100),1,THRESH_BINARY);

	totalPoints = sum(sobelgray__)[0];

	cout<<"max: "<<max<<endl;
	cout<<"totalPoints: "<<totalPoints<<endl;

	double br;
	double bg;
	double bb;
	double bsr;
	double bsg;
	double bsb;
	int xmin,xmax,ymin,ymax;
	//Mat bufABSmat;
	//Mat bufABS2mat;
	Mat result1;
	Mat cdmat;
	orderMap[_order].copyTo(cdmat);

	for (i=0; i<sz; i++){
		if (sobelgray__.at<uchar>(i/w4,i%w4) == 0) continue;
		float* bufABS = getFeatureVector(i%w4,i/w4,br,bg,bb,bsr,bsg,bsb);
		Mat bufABSmat ((_order/2-1)*3+3,1,CV_32FC1,bufABS);
				//bufABSmat;
		Mat corImg = Mat::zeros(Size(w4,h4),CV_8UC1);
		xmin=w4-1,xmax=0,ymin=h4-1,ymax=0;
		for (j=0; j<sz; j++){
			if (sobelgray__.at<uchar>(j/w4,j%w4) == 0) continue;
			float* bufABS2 = getFeatureVector(j%w4,j/w4,br,bg,bb,bsr,bsg,bsb);
			Mat bufABS2mat ((_order/2-1)*3+3,1,CV_32FC1,bufABS2);
			matchTemplate(bufABSmat, bufABS2mat, result1, CV_TM_CCOEFF_NORMED);

			if (bsr>255 && bsg>255 && bsb>255 && pow(result1.at<float>(0),3)>0.5)  {
				if (xmin > j%w4) xmin = j%w4;
				if (xmax < j%w4) xmax = j%w4;
				if (ymin > j/w4) ymin = j/w4;
				if (ymax < j/w4) ymax = j/w4;
				corImg.at<uchar>(j) = 1;//(result1.at<float>(0) > 0)? pow(result1.at<float>(0),10):0;
				if ((xmax-xmin)>6 && (ymax-ymin)>6)break;
			}
		}
//		namedWindow( "c", CV_WINDOW_AUTOSIZE );
//		imshow( "c", corImg*255);//thrCrCb[0] );
//
//		waitKey(0);

		if ((xmax-xmin)<=6 && (ymax-ymin)<=6) {
			cout<<"xy: "<<i%w4<<", "<<i/w4<<endl;
			cout<<"totalPoints: "<<sum(sobelgray__)[0]<<endl;
			circle( cdmat, Point(i%w4,i/w4), 1, Scalar( 0, 0, 255 ), -1, 8 );
			r.push_back(i%w4);
			r.push_back(i/w4);
		}
		//sobelgray__=sobelgray__-corImg;

	}



	//circle( cdmat, Point(maxid[1],maxid[0]), 1, Scalar( 0, 0, 255 ), -1, 8 );





	return r;
}
Esempio n. 9
0
vector<Box3d*> TableObjectDetector::getHulls(const Mat P, const Mat L, const Mat plane) {
    double K;
    minMaxIdx(L, NULL, &K);
    vector<Box3d*> B;
    Mat Rp = determinePlaneRotation(-plane.col(0));
    // We found a rotation from a horizontal plane to our fitted plane
    // We want a translation from our fitted plane to a horizontal plane
    transpose(Rp, Rp);
    Mat Pt; transpose(P, Pt);
    Mat P_rot = Rp*Pt;
        
    for (int k=0; k<=K; k++) {
        double xmin = 1000; double ymin = 1000; double zmin = 1000;
        double xmax = -1000; double ymax = -1000; double zmax = -1000;        
        
        for (int i=0; i<P_rot.cols; i++) {
            if (L.at<int>(i)==k) {
                double x = P_rot.at<double>(0, i);
                double y = P_rot.at<double>(1, i);
                double z = P_rot.at<double>(2, i);
                if (x < xmin) {
                    xmin = x;
                }
                if (x>xmax) {
                    xmax = x;
                }
                if (y<ymin) {
                    ymin = y;
                }
                if (y>ymax) {
                    ymax = y;
                }
                if (z<zmin) {
                    zmin = z;
                }
                if (z>zmax) {
                    zmax = z;
                }
            }
        }
        
        zmax += 0.015; // Add the threshold so that the hull touches the plane
        
        Mat Bp(3, 8, CV_64F);
        Bp.at<double>(0, 0) = xmin; Bp.at<double>(1, 0) = ymin; Bp.at<double>(2, 0) = zmin;
        Bp.at<double>(0, 1) = xmax; Bp.at<double>(1, 1) = ymin; Bp.at<double>(2, 1) = zmin;
        Bp.at<double>(0, 2) = xmax; Bp.at<double>(1, 2) = ymax; Bp.at<double>(2, 2) = zmin;
        Bp.at<double>(0, 3) = xmin; Bp.at<double>(1, 3) = ymax; Bp.at<double>(2, 3) = zmin;
        Bp.at<double>(0, 4) = xmin; Bp.at<double>(1, 4) = ymin; Bp.at<double>(2, 4) = zmax;
        Bp.at<double>(0, 5) = xmax; Bp.at<double>(1, 5) = ymin; Bp.at<double>(2, 5) = zmax;
        Bp.at<double>(0, 6) = xmax; Bp.at<double>(1, 6) = ymax; Bp.at<double>(2, 6) = zmax;
        Bp.at<double>(0, 7) = xmin; Bp.at<double>(1, 7) = ymax; Bp.at<double>(2, 7) = zmax;
        Mat Rpp; transpose(Rp, Rpp);
        Bp = Rpp*Bp;
        transpose(Bp, Bp);
        
        Box3d* Bk = new Box3d(Bp);
        B.push_back(Bk);
    }

    this->objectHulls = B;
    
    return B;
}
Esempio n. 10
0
void WaldBoost::fit(Mat& data_pos, Mat& data_neg)
{
    // data_pos: F x N_pos
    // data_neg: F x N_neg
    // every feature corresponds to row
    // every sample corresponds to column
    assert(data_pos.rows >= weak_count_);
    assert(data_pos.rows == data_neg.rows);

    std::vector<bool> feature_ignore;
    for (int i = 0; i < data_pos.rows; ++i) {
        feature_ignore.push_back(false);
    }

    Mat1f pos_weights(1, data_pos.cols, 1.0f / (2 * data_pos.cols));
    Mat1f neg_weights(1, data_neg.cols, 1.0f / (2 * data_neg.cols));
    Mat1f pos_trace(1, data_pos.cols, 0.0f);
    Mat1f neg_trace(1, data_neg.cols, 0.0f);

    bool quantize = false;
    if (data_pos.type() != CV_8U) {
        std::cerr << "quantize" << std::endl;
        quantize = true;
    }

    Mat1f data_min, data_step;
    int n_bins = 256;
    if (quantize) {
        compute_min_step(data_pos, data_neg, n_bins, data_min, data_step);
        quantize_data(data_pos, data_min, data_step);
        quantize_data(data_neg, data_min, data_step);
    }

    std::cerr << "pos=" << data_pos.cols << " neg=" << data_neg.cols << std::endl;
    for (int i = 0; i < weak_count_; ++i) {
        // Train weak learner with lowest error using weights
        double min_err = DBL_MAX;
        int min_feature_ind = -1;
        int min_polarity = 0;
        int threshold_q = 0;
        float min_threshold = 0;
//#pragma omp parallel for
        for (int feat_i = 0; feat_i < data_pos.rows; ++feat_i) {
            if (feature_ignore[feat_i])
                continue;

            // Construct cdf
            Mat1f pos_cdf(1, n_bins), neg_cdf(1, n_bins);
            compute_cdf(data_pos.row(feat_i), pos_weights, pos_cdf);
            compute_cdf(data_neg.row(feat_i), neg_weights, neg_cdf);

            float neg_total = (float)sum(neg_weights)[0];
            Mat1f err_direct = pos_cdf + neg_total - neg_cdf;
            Mat1f err_backward = 1.0f - err_direct;

            int idx1[2], idx2[2];
            double err1, err2;
            minMaxIdx(err_direct, &err1, NULL, idx1);
            minMaxIdx(err_backward, &err2, NULL, idx2);
//#pragma omp critical
            {
            if (min(err1, err2) < min_err) {
                if (err1 < err2) {
                    min_err = err1;
                    min_polarity = +1;
                    threshold_q = idx1[1];
                } else {
                    min_err = err2;
                    min_polarity = -1;
                    threshold_q = idx2[1];
                }
                min_feature_ind = feat_i;
                if (quantize) {
                    min_threshold = data_min(feat_i, 0) + data_step(feat_i, 0) *
                        (threshold_q + .5f);
                } else {
                    min_threshold = threshold_q + .5f;
                }
            }
            }
        }


        float alpha = .5f * (float)log((1 - min_err) / min_err);
        alphas_.push_back(alpha);
        feature_indices_.push_back(min_feature_ind);
        thresholds_.push_back(min_threshold);
        polarities_.push_back(min_polarity);
        feature_ignore[min_feature_ind] = true;

        double loss = 0;
        // Update positive weights
        for (int j = 0; j < data_pos.cols; ++j) {
            int val = data_pos.at<unsigned char>(min_feature_ind, j);
            int label = min_polarity * (val - threshold_q) >= 0 ? +1 : -1;
            pos_weights(0, j) *= exp(-alpha * label);
            pos_trace(0, j) += alpha * label;
            loss += exp(-pos_trace(0, j)) / (2.0f * data_pos.cols);
        }

        // Update negative weights
        for (int j = 0; j < data_neg.cols; ++j) {
            int val = data_neg.at<unsigned char>(min_feature_ind, j);
            int label = min_polarity * (val - threshold_q) >= 0 ? +1 : -1;
            neg_weights(0, j) *= exp(alpha * label);
            neg_trace(0, j) += alpha * label;
            loss += exp(+neg_trace(0, j)) / (2.0f * data_neg.cols);
        }
        double cascade_threshold = -1;
        minMaxIdx(pos_trace, &cascade_threshold);
        cascade_thresholds_.push_back((float)cascade_threshold);

        std::cerr << "i=" << std::setw(4) << i;
        std::cerr << " feat=" << std::setw(5) << min_feature_ind;
        std::cerr << " thr=" << std::setw(3) << threshold_q;
        std::cerr << " casthr=" << std::fixed << std::setprecision(3)
             << cascade_threshold;
        std::cerr <<  " alpha=" << std::fixed << std::setprecision(3)
             << alpha << " err=" << std::fixed << std::setprecision(3) << min_err
             << " loss=" << std::scientific << loss << std::endl;

        //int pos = 0;
        //for (int j = 0; j < data_pos.cols; ++j) {
        //    if (pos_trace(0, j) > cascade_threshold - 0.5) {
        //        pos_trace(0, pos) = pos_trace(0, j);
        //        data_pos.col(j).copyTo(data_pos.col(pos));
        //        pos_weights(0, pos) = pos_weights(0, j);
        //        pos += 1;
        //    }
        //}
        //std::cerr << "pos " << data_pos.cols << "/" << pos << std::endl;
        //pos_trace = pos_trace.colRange(0, pos);
        //data_pos = data_pos.colRange(0, pos);
        //pos_weights = pos_weights.colRange(0, pos);

        int pos = 0;
        for (int j = 0; j < data_neg.cols; ++j) {
            if (neg_trace(0, j) > cascade_threshold - 0.5) {
                neg_trace(0, pos) = neg_trace(0, j);
                data_neg.col(j).copyTo(data_neg.col(pos));
                neg_weights(0, pos) = neg_weights(0, j);
                pos += 1;
            }
        }
        std::cerr << "neg " << data_neg.cols << "/" << pos << std::endl;
        neg_trace = neg_trace.colRange(0, pos);
        data_neg = data_neg.colRange(0, pos);
        neg_weights = neg_weights.colRange(0, pos);


        if (loss < 1e-50 || min_err > 0.5) {
            std::cerr << "Stopping early" << std::endl;
            weak_count_ = i + 1;
            break;
        }

        // Normalize weights
        double z = (sum(pos_weights) + sum(neg_weights))[0];
        pos_weights /= z;
        neg_weights /= z;
    }
}