cv::vector< double > getMomentMatchBasedProbs( const cv::vector< cv::vector< cv::Point > > &contours )
    {
        cv::vector< double > probs( contours.size() );

        for( int i = 0; i < contours.size(); i++ )
        {
            probs[i] =  1.0 - fmin( matchShapes( contours[i], matchContour_, CV_CONTOURS_MATCH_I2, 0.0 ) / matchThreshold_, 1.0 );
        }
        return( probs );
    }
    cv::vector< double > getSurfMatchBasedProbs( const cv::vector< cv::vector< cv::Point > > &contours, cv_bridge::CvImagePtr cvPtr )
    {
        cv::vector< double > probs( contours.size() );

        for( int i = 0; i < contours.size(); i++ )
        {
            probs[i] = getSingleSurfProb( contours[i], cvPtr, i );
        }
        return( probs );
    }
Exemplo n.º 3
0
void RemoveNoise::removeOutlier(cv::vector<cv::Point2f>& start,
                                cv::vector<cv::Point2f>& end) const
{
    float averageNorm = 0.0f;
    
    for(auto startIter=start.begin(),endIter=end.begin();
        startIter!=start.end(); startIter++,endIter++)
    {
        averageNorm += cv::norm(*startIter - *endIter);
    }
    averageNorm /= start.size();
    
    for(auto startIter=start.begin(), endIter=end.begin();
        startIter!=start.end(); /* look at the end of for */)
    {
        if(cv::norm(*startIter - *endIter) > threshNorm * averageNorm){
            startIter = start.erase(startIter);
            endIter = end.erase(endIter);
            
            continue;
        }
        
        startIter++, endIter++;
    }
}
bool VisualFeatureExtraction::cutFeatures(cv::vector<cv::KeyPoint> &kpts,
		cv::Mat &features, unsigned short maxFeats) const {

	// store hash values in a map
	std::map<size_t, unsigned int> keyp_hashes;
	cv::vector<cv::KeyPoint>::iterator itKeyp;

	cv::Mat sorted_features;

	unsigned int iLine = 0;
	for (itKeyp = kpts.begin(); itKeyp < kpts.end(); itKeyp++, iLine++)
		keyp_hashes[(*itKeyp).hash()] = iLine;

	// sort values according to the response
	std::sort(kpts.begin(), kpts.end(), greater_than_response());
	// create a new descriptor matrix with the sorted keypoints
	sorted_features.create(0, features.cols, features.type());
	sorted_features.reserve(features.rows);
	for (itKeyp = kpts.begin(); itKeyp < kpts.end(); itKeyp++)
		sorted_features.push_back(features.row(keyp_hashes[(*itKeyp).hash()]));

	features = sorted_features.clone();

	// select the first maxFeats features
	if (kpts.size() > maxFeats) {
		vector<KeyPoint> cutKpts(kpts.begin(), kpts.begin() + maxFeats);
		kpts = cutKpts;

		features = features.rowRange(0, maxFeats).clone();
	}

	return 0;

}
Exemplo n.º 5
0
void setWorldPoints(cv::vector<cv::vector<cv::Point3f> > &worldPoints, 
		    const cv::Size patternSize,
		    const int &fileNum){
  worldPoints.clear();
  worldPoints.resize(fileNum);
  for(int i = 0; i < fileNum; i++ )
    {
      for(int j = 0; j < patternSize.height; j++ )
	for(int k = 0; k < patternSize.width; k++ )
	  worldPoints[i].push_back(cv::Point3f(k*g_squareSize, j*g_squareSize, 0));
    }
}
int GrayCodes::grayToDec(cv::vector<bool> gray)//convert a gray code sequence to a decimal number
{
    int dec = 0;
    bool tmp = gray[0];
    if(tmp)
        dec += (int) pow((float)2, int(gray.size() - 1));
    for(int i = 1; i < gray.size(); i++){
        tmp=Utilities::XOR(tmp,gray[i]);
        if(tmp)
            dec+= (int) pow((float)2,int (gray.size() - i - 1) );
    }
    return dec;
}
Exemplo n.º 7
0
cv::vector<cv::DMatch> Matching::getSymetryMatches(
        const cv::vector<cv::DMatch> &matches1, const cv::vector<cv::DMatch> &matches2){

    cv::vector<cv::DMatch> symmetryMatches;
    for(int i = 0; i < matches1.size(); i++){
        for (int j = 0; j < matches2.size(); j++){
            if(matches1[i].queryIdx == matches2[j].trainIdx && matches1[i].trainIdx == matches2[j].queryIdx ){
                symmetryMatches.push_back(cv::DMatch(matches1[i].queryIdx, matches1[i].trainIdx, matches1[i].distance));
                break;
            }
        }
    }
    return symmetryMatches;
}
Exemplo n.º 8
0
void AGPathDetection::detectPaths(cv::vector<cv::vector<AGImage>> &imagesMatrix)
{
    this->testingMode = false;
//    this->testSelectedImage(imagesMatrix[1][0]);
//    this->testSelectedImage(imagesMatrix[1][0]);
    for (int x = 0; x < imagesMatrix.size(); x++) {
        for (int y = 0; y < imagesMatrix.front().size(); y++) {
            Mat skeleton;
            this->prepareForPathDetecting(imagesMatrix[x][y].image, skeleton);
            this->searchForPathsInImageUsingSkeleton(imagesMatrix[x][y], skeleton);
            this->testDetectedPathsInImage(imagesMatrix[x][y]);
        }
    }
}
Exemplo n.º 9
0
bool RemoveNoise::isEnoughHalfVector(cv::vector<cv::Point2f>& start) const
{
    int xCenter = frameSize.width / 2;
    int count = 0;
    
    for(auto startIter=start.begin(); startIter!=start.end(); startIter++){
        if((left? startIter->x <= xCenter: xCenter < startIter->x)){
            count++;
        }
    }
    
    if(count < threshNum / 2) return false;
    
    return true;
}
	// 透視投影変換行列の推定
	void calcProjectionMatrix(cv::vector<cv::Point3d>& op, cv::vector<cv::Point2d>& ip, cv::Mat& dst)
	{
		cv::Mat A;
		A.create(cv::Size(12, op.size()*2), CV_64FC1);

		for (int i = 0, j = 0; i < op.size()*2; i+=2, ++j)
		{
			A.at<double>(i, 0) = 0.0;
			A.at<double>(i, 1) = 0.0;
			A.at<double>(i, 2) = 0.0;
			A.at<double>(i, 3) = 0.0;

			A.at<double>(i, 4) = -op[j].x;
			A.at<double>(i, 5) = -op[j].y;
			A.at<double>(i, 6) = -op[j].z;
			A.at<double>(i, 7) = -1.0;

			A.at<double>(i, 8) = ip[j].y*op[j].x;
			A.at<double>(i, 9) = ip[j].y*op[j].y;
			A.at<double>(i, 10) = ip[j].y*op[j].z;
			A.at<double>(i, 11) = ip[j].y;

			A.at<double>(i+1, 0) = op[j].x;
			A.at<double>(i+1, 1) = op[j].y;
			A.at<double>(i+1, 2) = op[j].z;
			A.at<double>(i+1, 3) = 1.0;

			A.at<double>(i+1, 4) = 0.0;
			A.at<double>(i+1, 5) = 0.0;
			A.at<double>(i+1, 6) = 0.0;
			A.at<double>(i+1, 7) = 0.0;

			A.at<double>(i+1, 8) = -ip[j].x*op[j].x;
			A.at<double>(i+1, 9) = -ip[j].x*op[j].y;
			A.at<double>(i+1, 10) = -ip[j].x*op[j].z;
			A.at<double>(i+1, 11) = -ip[j].x;
		}

		cv::Mat pvect;
		cv::SVD::solveZ(A, pvect);

		cv::Mat pm(3, 4, CV_64FC1);
		for (int i = 0; i < 12; i++)
		{
			pm.at<double>(i/4, i%4) = pvect.at<double>( i );
		}
		dst = pm;
	}
Exemplo n.º 11
0
void drawDetections(const cv::vector<cv::Point2f>& detections, const cv::Scalar& color, cv::Mat image)
{
    for (size_t i = 0; i < detections.size(); ++i)
    {
        circle(image, detections[i], 3, color, 1, 8, 0);
    }
}
Exemplo n.º 12
0
/**
 * Another function to validate ellipses. Based on having an error measure of
 * points of contours with respect to their respective position
 * on the fitted ellipse.
 */
bool MyEllipses::errorMeasureEllipse(cv::RotatedRect anEllipse, cv::vector<cv::Point> setOfPoints){

	/* Equation of an ellipse
 	 (x-h)^2/a^2 + (y-k)^2/b^2 = 1
	where,
	(h,k) are the coordinates of the center of the ellipse
	a is the length of the semi-major or minor axis
	b is the length of the semi-major or minor axis
	 */
	float h = anEllipse.center.x;
	float k = anEllipse.center.y;

	float a = anEllipse.size.width / 2;
	float b = anEllipse.size.height / 2;
	float diff = 0;

	int size = setOfPoints.size();

	for( int i = 0; i < size; i++){
		float xx = (float) (setOfPoints[i].x) - h;
		float yy = (float) (setOfPoints[i].y) - k;

		float common = a*b/(float) (sqrt((double) ((b*xx)*(b*xx) + (a*yy)*(a*yy))));
		float xIntersection = xx*common;
		float yIntersection = yy*common;

		//distance = sqrt((xx-x)2 + (yy-y)2)
		float currentDiff = (float) sqrt((xx-xIntersection)*(xx-xIntersection) + (yy-yIntersection)*(yy-yIntersection));
		diff = diff + currentDiff;
	}
	diff = diff/size;
	return diff < 100;
}
Exemplo n.º 13
0
bool RemoveNoise::isEnoughAllVector(cv::vector<cv::Point2f>& start) const
{
    int count = (int) start.size();
    
    if(count < threshNum) return false;
    
    return true;
}
Exemplo n.º 14
0
//Remove pink pocket from vector that is between 2 green points.
void PointLocator::removePinkCandidate(cv::vector<cv::KeyPoint> &pinkKeyPoints, cv::KeyPoint firstPocket, cv::KeyPoint secondPocket){
	//First check that there are actually pink pocket points
	if (!pinkKeyPoints.empty()){
		float distance = -1;
		int min = 0;
		cv::KeyPoint middlePoint;
		middlePoint.pt.x = (firstPocket.pt.x + secondPocket.pt.x) / 2;
		middlePoint.pt.y = (firstPocket.pt.y + secondPocket.pt.y) / 2;
		for (int i = 0; i < pinkKeyPoints.size(); i++){
			float newDistance = distBetweenKeyPoints(pinkKeyPoints[i], middlePoint);
			if ((distance + 1) < epsilon || newDistance < distance){
				distance = newDistance;
				min = i;
			}
		}
		pinkKeyPoints.erase(pinkKeyPoints.begin() + min, pinkKeyPoints.begin() + min + 1);
	}
}
Exemplo n.º 15
0
  /* function AverageLine */
  void lineAverage(cv::vector<cv::Vec2f> lines, cv::Mat& src)
  {
	  float rho=0,theta=0;
	  for( size_t i = 0; i < lines.size(); i++ )
	  		{
	  		 rho += lines[i][0];theta += lines[i][1];
	  		}
	 rho/=lines.size();theta/=lines.size();
	 cv::Point pt1, pt2;
	 double a = cos(theta), b = sin(theta);
	 double x0 = a*rho, y0 = b*rho;
	 pt1.x = cvRound(x0 + 1000*(-b));
	 pt1.y = cvRound(y0 + 1000*(a));
	 pt2.x = cvRound(x0 - 1000*(-b));
	 pt2.y = cvRound(y0 - 1000*(a));
	 //cv::line( src, pt1, pt2, cv::Scalar(80,10,55), 3, CV_AA);
	 float rho1=0,rho2=0,theta1=0,theta2=0;
	 float i1=0,i2=0;
	 for( size_t i = 0; i < lines.size(); i++ )
		{
		 if (lines[i][1]>theta)
		 {	 rho1 += lines[i][0];theta1 += lines[i][1];i1++;}
		 else
		 {   rho2 += lines[i][0];theta2 += lines[i][1];i2++;}
		}
	 rho1/=i1;theta1/=i1;
	 a = cos(theta1), b = sin(theta1);
	 x0 = a*rho1, y0 = b*rho1;
	 pt1.x = cvRound(x0 + 1000*(-b));
	 pt1.y = cvRound(y0 + 1000*(a));
	 pt2.x = cvRound(x0 - 1000*(-b));
	 pt2.y = cvRound(y0 - 1000*(a));
	 cv::line( src, pt1, pt2, cv::Scalar(0,100,255), 3, CV_AA);
	 rho2/=i2;theta2/=i2;
	 a = cos(theta2), b = sin(theta2);
	 x0 = a*rho2, y0 = b*rho2;
	 pt1.x = cvRound(x0 + 1000*(-b));
	 pt1.y = cvRound(y0 + 1000*(a));
	 pt2.x = cvRound(x0 - 1000*(-b));
	 pt2.y = cvRound(y0 - 1000*(a));
	 cv::line( src, pt1, pt2, cv::Scalar(0,100,0), 3, CV_AA);
  }
	//ランダムに6点を抽出
	void get_six_points(cv::vector<cv::Point2d>& calib_p, cv::vector<cv::Point3d>& calib_P, cv::vector<cv::Point2d>& src_p, cv::vector<cv::Point3d>& src_P)
	{
		int i=0;
		srand(time(NULL));    /* 乱数の初期化 */
		cv::Vector<int> exists;
		while(i <= 6){
			int maxValue = (int)src_p.size();
			int v = rand() % maxValue;
			bool e2=false;
			for(int s=0; s<i; s++){
				if(exists[s] == v) e2 = true; 
			}
			if(!e2){
				exists.push_back(v);
				calib_P.push_back(src_P[v]);
				calib_p.push_back(src_p[v]);
				i++;
			}
		}
	}
Exemplo n.º 17
0
void loadImages(cv::vector<cv::Mat> &rgb, 
		cv::vector<cv::Mat> &depth, 
		const int &fileNum){
  rgb.clear();
  depth.clear();
  
  for(int i = 0; i < fileNum; ++i){
    stringstream rgbfilename, depthfilename;
    rgbfilename << FLAGS_folder <<"/" << FLAGS_color << i << FLAGS_type;
    depthfilename << FLAGS_folder << "/" << FLAGS_depth << i << FLAGS_type;
    
    cout << "loading : " << rgbfilename.str() << " and " << depthfilename.str() << endl;

    // load RGB image
    cv::Mat tempRGB = cv::imread(rgbfilename.str(), 0);
    rgb.push_back(tempRGB);


    // load depth image
    cv::Mat tempDepth = cv::imread(depthfilename.str(), CV_LOAD_IMAGE_ANYDEPTH);
    tempDepth.convertTo(tempDepth, CV_8U, 255.0/1000.0);
    // cv::Mat maxDist = cv::Mat::ones(tempDepth.rows, tempDepth.cols, CV_8U) * MAX_DEPTH;
    // cv::Mat minDist = cv::Mat::ones(tempDepth.rows, tempDepth.cols, CV_8U) * MIN_DEPTH;
    // cv::min(tempDepth, maxDist, tempDepth);
    // tempDepth -= minDist;
    cv::resize(tempDepth, tempDepth, cv::Size(), 2.0,2.0);
    cv::Mat roiTempDepth;

    cv::resize(tempDepth(cv::Rect(40, 43,498,498 / 4 * 3)), roiTempDepth, cv::Size(640, 480));

    depth.push_back(roiTempDepth);

    std::cout << "loaded" << std::endl;

    cv::imshow("rgb",rgb[i]);
    cv::imshow("depth",depth[i]);
    cv::waitKey(100);
  }

}
Exemplo n.º 18
0
/**
 * A function that takes a list of ellipses with their respective quality and returns the best one.
 */
cv::RotatedRect MyEllipses::getBestEllipse(cv::vector<cv::RotatedRect> ellipses, cv::vector<double> qualityOfEllipses){
	int size = ellipses.size(); std::cout<<size;
	double maxQuality = 0;
	int index = 0;

	for(int i = 0; i < size; i++){
		if( qualityOfEllipses[i]>maxQuality ){
			maxQuality = qualityOfEllipses[i];
			index = i;
		}
	}
	return ellipses[index];
}
Exemplo n.º 19
0
/*
 * Takes the vector of detected contours from and image and determines whether the points of a contour are clustered around an endpoint making the contour invalid.
 * Any contours deemed to be invalid are not included in the returned vector containing contours deemed valid
 * 
 *@param contours - a vector containing all the detected contours
 *@param lines - a vector containing all detected straight lines
 * 
 *@return - a vector of valid contours
*/
cv::vector< cv::vector<cv::Point> > ImageProcessor::removeRedundantContours(cv::vector< cv::vector<cv::Point> > & contours, cv::vector<cv::Vec4i> lines){
	
	cv::vector< cv::vector<cv::Point> > valid_contours; //a vector to contain all contours that are determined to be valid

	for(int i = 0; i < (int)contours.size(); i++){

		float invalid_point_count = 0.0f; //count of points in a contour that are clustering around an endpoint
		cv::vector<cv::Point> contour_vec = contours[i];
		for(int k = 0; k < (int)contour_vec.size(); k++){

			//compute the distance between each point in the contour and the endpoints of each straight line
			cv::Point point = contour_vec[k];
			for(int j = 0; j < (int)lines.size(); j++){
				//distances of countour point from each endpoint
				double dist1 = distance(point, cv::Point(lines[j][0], lines[j][1]));
				double dist2 = distance(point, cv::Point(lines[j][2], lines[j][3]));
				//distance between both endpoints of line[j]
				double endpoint_dist = distance(cv::Point(lines[j][0], lines[j][1]), cv::Point(lines[j][2], lines[j][3]));
				
				double contour_inLine_dist = dist1 + dist2;

				//if the distance between the contour point and a line endpoint, then increment the number of detected invalid points
				if(dist1 < MAX_DIST || dist2 < MAX_DIST || (contour_inLine_dist - endpoint_dist) < 1.0 ){
					invalid_point_count++;
					break;
				}
			}
		}
		
		//compute the percentage of invalid points in the contour, current contour is valid and pushed onto back of valid_contour vector
		//if less than 10% of the points are found to be invalid.
		float invalid_percentage = invalid_point_count / (float) contour_vec.size();
		if(invalid_percentage < PERCENTAGE){
			valid_contours.push_back(contours[i]);
		}				
	}

	return valid_contours;
}
    cv::vector< double > getAreaBasedProbs( const cv::vector< cv::vector< cv::Point > > &contours )
    {
        int largestContour = -1;
        double maxArea = 0.0;
        cv::vector< double > probs( contours.size() );

        for( int i = 0; i < contours.size(); i++ )
        {
            probs[i] = cv::contourArea( contours[i] );
            if( maxArea < probs[i] )
            {
                maxArea = probs[i];
                largestContour = i;
            }
        }

        for( int i = 0; i < contours.size(); i++ )
        {
            probs[i] /= probs[largestContour];
        }

        return( probs );
    }
Exemplo n.º 21
0
void SiftGPUWrapper::detect(const cv::Mat& image, cv::vector<cv::KeyPoint>& keypoints, std::vector<float>& descriptors, const Mat& mask) const {
    if (error) {
        keypoints.clear();
        ROS_FATAL("SiftGPU cannot be used. Detection of keypoints failed");
    }

    //get image
    cvMatToSiftGPU(image, data);

    int num_features = 0;
    SiftGPU::SiftKeypoint* keys = 0;

    ROS_DEBUG("SIFTGPU: cols: %d, rows: %d", image.cols, image.rows);
    if (siftgpu->RunSIFT(image.cols, image.rows, data, GL_LUMINANCE, GL_UNSIGNED_BYTE)) {
        num_features = siftgpu->GetFeatureNum();
        ROS_INFO("Number of features found: %i", num_features);
        keys = new SiftGPU::SiftKeypoint[num_features];
        descriptors.resize(128 * num_features);
        //descriptors = new float[128 * num_features];
        siftgpu->GetFeatureVector(&keys[0], &descriptors[0]);
    } else {
        ROS_WARN("SIFTGPU->RunSIFT() failed!");
    }

    //copy to opencv structure
    keypoints.clear();
    for (int i = 0; i < num_features; ++i) {
        KeyPoint key(keys[i].x, keys[i].y, keys[i].s, keys[i].o);
        keypoints.push_back(key);
    }

    //	FILE *fp = fopen("bla.pgm", "w");
    //	WritePGM(fp, data, image.cols, image.rows);
    //	fclose(fp);

}
void loadImages(cv::vector<cv::Mat> &lefts, cv::vector<cv::Mat> &rights,
                const int &fileNum) {
    cv::namedWindow("Left", CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
    cv::namedWindow("Right", CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);

    lefts.clear();
    rights.clear();

    for (int i = 0; i < fileNum; i++) {
        std::stringstream lfile, rfile;
        lfile << FLAGS_dir << "/left_" << i << FLAGS_suffix;
        rfile << FLAGS_dir << "/right_" << i << FLAGS_suffix;
        std::cout << "load: " << lfile.str() << ", " << rfile.str() << std::endl;

        cv::Mat left = cv::imread(lfile.str(), 0);
        cv::Mat right = cv::imread(rfile.str(), 0);
        lefts.push_back(left);
        rights.push_back(right);

        cv::imshow("Left", left);
        cv::imshow("Right", right);
        cv::waitKey(100);
    }
}
Exemplo n.º 23
0
int findChessboard(cv::vector<cv::Mat> &rgb, cv::vector<cv::Mat> &depth, 
		   cv::vector<cv::vector<cv::vector<cv::Point2f> > > &imagePoints,
		   const cv::Size patternSize,
		   const int &fileNum){
    for(int i = 0; i < rgb.size(); ++i){
    cout << i << endl;
    
    if( cv::findChessboardCorners( rgb[i], 
				   patternSize, 
				   imagePoints[0][i],
				   CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE 
				   ) && 
	cv::findChessboardCorners( depth[i], 
				   patternSize, 
				   imagePoints[1][i] ,
				   CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE 
				   ) ) {
    

      std::cout << " ... All corners found." << std::endl;

      cv::cornerSubPix(rgb[i], imagePoints[0][i], cv::Size(11,11), cv::Size(-1,-1),
		       cv::TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
					30, 0.01));

      cv::cornerSubPix(depth[i], imagePoints[1][i], cv::Size(11,11), cv::Size(-1,-1),
   		       cv::TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
   					30, 0.01));
      // 検出点を描画する
      cv::drawChessboardCorners( rgb[i], patternSize, ( cv::Mat )( imagePoints[0][i] ), true );
      cv::drawChessboardCorners( depth[i], patternSize, ( cv::Mat )( imagePoints[1][i] ), true );
      cv::imshow( "rgb", rgb[i] );
      cv::imshow("depth", depth[i]);
      cv::waitKey( 100 );
    } else {
      std::cout << " ... at least 1 corner not found." << std::endl;
      rgb.erase(rgb.begin() + i);
      depth.erase(depth.begin() + i);
      imagePoints[0].erase(imagePoints[0].begin() + i);
      imagePoints[1].erase(imagePoints[1].begin() + i);
      cout << rgb.size() << endl;;
      //      fileNum--;
      i--;
      cv::waitKey( 100 );
    }
  }
    return rgb.size();
}
Exemplo n.º 24
0
/*
 * Convert the points that contain contour coordinates and store them in a Vec4i data type
 *
 *@param contours - vector containining contour information
 * 
 *@return the contour information as a vector of Vec4i data types
*/
cv::vector<cv::Vec4i> ImageProcessor::pointsToVec4i(const cv::vector< cv::vector<cv::Point> > & contours){

	cv::vector<cv::Vec4i> vector;

	for(int i = 0; i < (int)contours.size(); i++){
		cv::vector<cv::Point> contour = contours[i];
		for(int j = 1; j < (int)contour.size();j++){
			cv::Point p1, p2;
			cv::Vec4i vec;
			p1 = contour[j - 1];
			p2 = contour[j];
			vec[0] = p1.x;
			vec[1] = p1.y;
			vec[2] = p2.x;
			vec[3] = p2.y;
			vector.push_back(vec);
		}
	}
	
	return vector;
}
	//元画像の特徴点と、再計算した特徴点の誤差を求める
	double inspection_error_value(cv::Mat& cameraMat, cv::vector<cv::Point3d>& P, cv::vector<cv::Point2d>& groundTruth)
	{
		if(cameraMat.cols != 4 || cameraMat.rows != 3){
			return 0.0;
		}
		cv::vector<cv::Point2d> p;
		for(int i=0; i<(int)P.size(); i++){
			double x = (cameraMat.at<double>(0,0)*P[i].x + cameraMat.at<double>(0,1)*P[i].y + cameraMat.at<double>(0,2)*P[i].z + cameraMat.at<double>(0,3))
						/ (cameraMat.at<double>(2,0)*P[i].x + cameraMat.at<double>(2,1)*P[i].y + cameraMat.at<double>(2,2)*P[i].z + cameraMat.at<double>(2,3));
			double y = (cameraMat.at<double>(1,0)*P[i].x + cameraMat.at<double>(1,1)*P[i].y + cameraMat.at<double>(1,2)*P[i].z + cameraMat.at<double>(1,3))
						/ (cameraMat.at<double>(2,0)*P[i].x + cameraMat.at<double>(2,1)*P[i].y + cameraMat.at<double>(2,2)*P[i].z + cameraMat.at<double>(2,3));
			p.push_back(cv::Point2d(x,y));
		}

		double sum = 0.0;
		for(int i=0; i<(int)p.size(); i++){
			double error = pow(pow(groundTruth[i].x-p[i].x,2.0)+pow(groundTruth[i].y-p[i].y,2.0),0.5);
			sum = sum + error;
		}

		return sum/(int)p.size();
	}
Exemplo n.º 26
0
int TrainingSet::getTrainingSet(cv::vector<cv::Mat> &images, cv::vector<int> &labels, cv::vector<QString> &names){
  QStringList directoriesList = getAvailableFaces();

  labels.clear();
  images.clear();
  names.clear();
  for(int i=0; i<directoriesList.size(); i++){
    QString person = directoriesList.at(i);
    QDir faceDirectory;
    faceDirectory.setPath(mainDirectory.path() + "/" + person);
    QStringList photosList = faceDirectory.entryList(QDir::Files);
    foreach(QString photo, photosList){
      images.push_back(cv::imread(faceDirectory.path().toStdString() + "/" + photo.toStdString(), CV_LOAD_IMAGE_GRAYSCALE));
      labels.push_back(i);
    }
    names.push_back(person);
  }
int findChessboards(
        cv::vector<cv::Mat> &lefts, cv::vector<cv::Mat> &rights,
        cv::vector<cv::vector<cv::vector<cv::Point2f>>> &imagePoints,
        const cv::Size patternSize, const int &fileNum) {
    for (size_t i = 0; i < lefts.size(); ++i) {
        if (cv::findChessboardCorners(
                lefts[i], patternSize, imagePoints[0][i],
                CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE) &&
            cv::findChessboardCorners(
                rights[i], patternSize, imagePoints[1][i],
                CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE)) {
            cv::cornerSubPix(
                    lefts[i], imagePoints[0][i], cv::Size(11, 11), cv::Size(-1, -1),
                    cv::TermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 30, 0.01));
            cv::cornerSubPix(
                    rights[i], imagePoints[1][i], cv::Size(11, 11), cv::Size(-1, -1),
                    cv::TermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 30, 0.01));

            cv::drawChessboardCorners(
                    lefts[i], patternSize, (cv::Mat)(imagePoints[0][i]), true);
            cv::drawChessboardCorners(
                    rights[i], patternSize, (cv::Mat)(imagePoints[1][i]), true);

            cv::imshow("Left", lefts[i]);
            cv::imshow("Right", rights[i]);
        } else {
            std::cout << "cannot find all corners" << std::endl;

            lefts.erase(lefts.begin() + i);
            rights.erase(rights.begin() + i);
            imagePoints[0].erase(imagePoints[0].begin() + i);
            imagePoints[1].erase(imagePoints[1].begin() + i);
            i--;
        }

        cv::waitKey(100);
    }

    return lefts.size();
}
Exemplo n.º 28
0
Arquivo: MSFM.cpp Projeto: braisCB/WMM
cv::Mat MSFM::MSFMSurfaceO2(cv::Mat& image, cv::vector<cv::Point>& initials, cv::Point2d &h) {
    
    cv::Mat u_surface = MAX_VAL*cv::Mat::ones(image.rows, image.cols, CV_64FC1);
    cv::Mat state = cv::Mat::zeros(image.rows, image.cols, CV_8UC1);
    
    std::multimap<double, cv::Point> trial_set;
    std::map<int, std::multimap<double, cv::Point>::iterator> mapa_trial;
    
    std::multimap<double, cv::Point>::iterator trial_set_it;
    std::map<int, std::multimap<double, cv::Point>::iterator>::iterator mapa_trial_it;
    std::pair<double, cv::Point> pr_trial;
    std::pair<int, std::multimap<double, cv::Point>::iterator> pr_mapa;
    int key, i;
    cv::Point winner, neigh;
    
    // Initialization
    for (i = 0; i < (int) initials.size(); i++) {
        key = initials[i].y + image.rows*initials[i].x;
        if (mapa_trial.find(key) == mapa_trial.end()) {
            distance2M(initials[i]) = 0.0;
            state2M(initials[i]) = P_TRIAL;
            pr_trial = std::pair<double, cv::Point>(0.0, initials[i]);
            trial_set_it = trial_set.insert(pr_trial);
            pr_mapa = std::pair<int, std::multimap<double, cv::Point>::iterator>(key, trial_set_it);
            mapa_trial.insert(pr_mapa);
        }
    }
    
    // LOOP
    while (!trial_set.empty()) {
        
        trial_set_it = trial_set.begin();
        
        key = trial_set_it->second.y + image.rows*trial_set_it->second.x;
        mapa_trial_it = mapa_trial.find(key);
        
        if (mapa_trial_it == mapa_trial.end()) {
            printf("ERROR: bad map alloc");
            exit(-1);
        }
        
        if (mapa_trial_it->second != trial_set_it) {
            printf("ERROR: bad trial/map alloc");
            exit(-1);
        }
        
        winner = trial_set_it->second;
        trial_set.erase(trial_set_it);
        mapa_trial.erase(mapa_trial_it);
                
        state2M(winner) = P_ALIVE;
        
        // UPWIND PROCEDURE
        for (int i=-1; i<2; i+=2) {
            neigh = cv::Point(winner.x + i, winner.y);
            if (contains2M(neigh))
                this->StencilS1O2(image, u_surface, state, trial_set, mapa_trial, neigh, h);
            neigh = cv::Point(winner.x, winner.y + i);
            if (contains2M(neigh))
                this->StencilS1O2(image, u_surface, state, trial_set, mapa_trial, neigh, h);
            neigh = cv::Point(winner.x + i, winner.y + i);
            if (contains2M(neigh))
                this->StencilS2O2(image, u_surface, state, trial_set, mapa_trial, neigh, h);
            neigh = cv::Point(winner.x - i, winner.y + i);
            if (contains2M(neigh))
                this->StencilS2O2(image, u_surface, state, trial_set, mapa_trial, neigh, h);
        }
    }
    
    return u_surface;
}
Exemplo n.º 29
0
cv::vector<pocket> PointLocator::infer(cv::vector<cv::KeyPoint> orangeKeyPoints, cv::vector<cv::KeyPoint> greenKeyPoints,
										cv::vector<cv::KeyPoint> purpleKeyPoints, cv::vector<cv::KeyPoint> pinkKeyPoints)
{
	//Define vector of pocket points to be passed
	cv::vector<pocket> pockets;

	//There should be a maximum of 2 points per colour. If there is more, reduce.
	//This depends on the quality of test video results.
	//Right now it just takes the first 2 points in vector to prevent crashes.
	//Takes only one point for orange and purple since they are side pockets
	//TODO if needed later.
	if (orangeKeyPoints.size() > 1){
		orangeKeyPoints.erase(orangeKeyPoints.begin() + 1, orangeKeyPoints.end());
	}
	if (greenKeyPoints.size() > 2){
		greenKeyPoints.erase(greenKeyPoints.begin() + 2, greenKeyPoints.end());
	}
	if (purpleKeyPoints.size() > 1){
		purpleKeyPoints.erase(purpleKeyPoints.begin() + 1, purpleKeyPoints.end());
	}
	if (pinkKeyPoints.size() > 3){
		pinkKeyPoints.erase(pinkKeyPoints.begin() + 3, pinkKeyPoints.end());
	}
	//Returns a vector of pocket type
	pockets = labelPockets(orangeKeyPoints, greenKeyPoints, purpleKeyPoints, pinkKeyPoints);

	return pockets;
}
Exemplo n.º 30
0
cv::vector<pocket> PointLocator::labelPockets(cv::vector<cv::KeyPoint> orangeKeyPoints, cv::vector<cv::KeyPoint> greenKeyPoints,
	cv::vector<cv::KeyPoint> purpleKeyPoints, cv::vector<cv::KeyPoint> pinkKeyPoints){
	//Define vector of pocket points to be passed
	cv::vector<pocket> pockets(4);
	int pocketCount = 0;
	int realPocketCount = 0;
	bool pinkTop = true;
	bool pinkLeft = true;
	bool pinkRight = true;

	if (greenKeyPoints.size() + orangeKeyPoints.size() + pinkKeyPoints.size() + purpleKeyPoints.size() >= 4){
		defPerspective = true;
	}

	//Select green pockets: Case 1: 2 green pockets in view
	if (greenKeyPoints.size() == 2){
		//Step 1: If only green pockets are seen, select destination locations based on their x values.
		if (orangeKeyPoints.size() == 0 && purpleKeyPoints.size() == 0){
			if (greenKeyPoints[0].pt.x < greenKeyPoints[1].pt.x){
				pockets[0].pocketPoints = greenKeyPoints[0];
				pockets[1].pocketPoints = greenKeyPoints[1];
			}
			else{
				pockets[0].pocketPoints = greenKeyPoints[1];
				pockets[1].pocketPoints = greenKeyPoints[0];
			}
		}
		//Step 2: If green end pockets and if both purple and orange side pockets are in view
		//Is there more logic we can use to make sure this is right? Right now it is same as just orange pockets logic.
		else if (orangeKeyPoints.size() > 0 && purpleKeyPoints.size() > 0){
			float distGreen0ToOrange = distBetweenKeyPoints(greenKeyPoints[0], orangeKeyPoints[0]);
			float distGreen1ToOrange = distBetweenKeyPoints(greenKeyPoints[1], orangeKeyPoints[0]);
			if (distGreen0ToOrange > distGreen1ToOrange){
				pockets[0].pocketPoints = greenKeyPoints[0];
				pockets[1].pocketPoints = greenKeyPoints[1];
			}
			else{
				pockets[0].pocketPoints = greenKeyPoints[1];
				pockets[1].pocketPoints = greenKeyPoints[0];
			}
		}
		//Step 3: If green end pockets and if only the orange side pocket is in view
		else if (orangeKeyPoints.size() > 0){
			float distGreen0ToOrange = distBetweenKeyPoints(greenKeyPoints[0], orangeKeyPoints[0]);
			float distGreen1ToOrange = distBetweenKeyPoints(greenKeyPoints[1], orangeKeyPoints[0]);
			if (distGreen0ToOrange > distGreen1ToOrange){
				pockets[0].pocketPoints = greenKeyPoints[0];
				pockets[1].pocketPoints = greenKeyPoints[1];
			}
			else{
				pockets[0].pocketPoints = greenKeyPoints[1];
				pockets[1].pocketPoints = greenKeyPoints[0];
			}
		}
		//Step 4: If green end pockets and if only the purple side pocket is in view
		else if (purpleKeyPoints.size() > 0){
			float distGreen0ToPurple = distBetweenKeyPoints(greenKeyPoints[0], purpleKeyPoints[0]);
			float distGreen1ToPurple = distBetweenKeyPoints(greenKeyPoints[1], purpleKeyPoints[0]);
			if (distGreen0ToPurple < distGreen1ToPurple){
				pockets[0].pocketPoints = greenKeyPoints[0];
				pockets[1].pocketPoints = greenKeyPoints[1];
			}
			else{
				pockets[0].pocketPoints = greenKeyPoints[1];
				pockets[1].pocketPoints = greenKeyPoints[0];
			}
		}

		//Removes pink keypoint candidate which is between green pockets. (Co-linear)
		removePinkCandidate(pinkKeyPoints, pockets[0].pocketPoints, pockets[1].pocketPoints);
		pinkTop = false;

		//Puts the pockets destination locations in since top left pocket will always be pockets[0]
		pockets[0].xLocation = xLeft;
		pockets[0].yLocation = yTop;
		pockets[1].xLocation = xRight;
		pockets[1].yLocation = yTop;

		//Updates Pocket count
		pocketCount = 2;
		realPocketCount = 2;
	}

	//Step 5: Select green pockets: Case 2: 1 green pocket in view
	if (greenKeyPoints.size() == 1){
		pockets[0].pocketPoints = greenKeyPoints[0];
		if (orangeKeyPoints.size() > 0 && purpleKeyPoints.size() > 0){
			float distToOrange = distBetweenKeyPoints(greenKeyPoints[0], orangeKeyPoints[0]);
			float distToPurple = distBetweenKeyPoints(greenKeyPoints[0], purpleKeyPoints[0]);
			if (distToOrange < distToPurple){
				pockets[0].xLocation = xRight;
				pockets[0].yLocation = yTop;
			}
			else{
				pockets[0].xLocation = xLeft;
				pockets[0].yLocation = yTop;
			}
		}
		else if (orangeKeyPoints.size() > 0){
			pockets[0].xLocation = xRight;
			pockets[0].yLocation = yTop;
		}
		else if (purpleKeyPoints.size() > 0){
			pockets[0].xLocation = xLeft;
			pockets[0].yLocation = yTop;
		}
		//Updates Pocket count
		pocketCount = 1;
		realPocketCount = 1;
	}

	//Update orange and purple pockets after green pockets are in so we know that green pockets are first in vector.
	if (orangeKeyPoints.size() > 0){
		pockets[pocketCount].pocketPoints = orangeKeyPoints[0];
		pockets[pocketCount].xLocation = xRight;
		pockets[pocketCount].yLocation = yMid;
		pocketCount++;
		realPocketCount++;
	}
	if (purpleKeyPoints.size() > 0){
		pockets[pocketCount].pocketPoints = purpleKeyPoints[0];
		pockets[pocketCount].xLocation = xLeft;
		pockets[pocketCount].yLocation = yMid;
		pocketCount++;
		realPocketCount++;
	}

	//Removes pink candidates between green and orange and pink pockets
	if (greenKeyPoints.size() == 2){
		if (orangeKeyPoints.size() > 0){
			removePinkCandidate(pinkKeyPoints, pockets[1].pocketPoints, orangeKeyPoints[0]);
			pinkRight = false;
		}
		if (purpleKeyPoints.size() > 0){
			removePinkCandidate(pinkKeyPoints, pockets[0].pocketPoints, purpleKeyPoints[0]);
			pinkLeft = false;
		}
	}
	else if (greenKeyPoints.size() == 1){
		int removeLocation = 0;
		if (orangeKeyPoints.size() > 0 && purpleKeyPoints.size() > 0){
			float distToOrange = distBetweenKeyPoints(orangeKeyPoints[0], pockets[0].pocketPoints);
			float distToPurple = distBetweenKeyPoints(purpleKeyPoints[0], pockets[0].pocketPoints);
			if (distToOrange > distToPurple){
				removeLocation = 2;
			}
			else{
				removeLocation = 1;
			}
		}
		else if (orangeKeyPoints.size() > 0 && (removeLocation == 0 || removeLocation == 1)){
			removePinkCandidate(pinkKeyPoints, pockets[0].pocketPoints, orangeKeyPoints[0]);
			pinkRight = false;
		}
		if (purpleKeyPoints.size() > 0 && (removeLocation == 0 || removeLocation == 2)){
			removePinkCandidate(pinkKeyPoints, pockets[0].pocketPoints, purpleKeyPoints[0]);
			pinkLeft = false;
		}
	}
	

	//Adds pink pockets to list of pockets based on other pockets identified.
	while (!pinkKeyPoints.empty() && pockets[3].xLocation == NULL && pocketCount < 4){
		//Find the pink marker closest to the first pocket in list.
		//It is structured so this is always the right marker to choose because of elimination of markers from candidate list.
		float distance = -1;
		int min = 0;
		for (int i = 0; i < pinkKeyPoints.size(); i++){
			float newDistance = distBetweenKeyPoints(pinkKeyPoints[i], pockets[0].pocketPoints);
			if ((distance + 1) < epsilon || newDistance < distance){
				distance = newDistance;
				min = i;
			}
		}
		pockets[pocketCount].pocketPoints = pinkKeyPoints[min];

		//
		if (pinkTop){
			pockets[pocketCount].xLocation = xMid;
			pockets[pocketCount].yLocation = yTop;
			pocketCount++;
			pinkTop = false;
		}
		else if (pinkLeft){
			pockets[pocketCount].xLocation = xLeft;
			pockets[pocketCount].yLocation = yMidTop;
			pocketCount++;
			pinkLeft = false;
		}
		else if (pinkRight){
			pockets[pocketCount].xLocation = xRight;
			pockets[pocketCount].yLocation = yMidTop;
			pocketCount++;
			pinkRight = false;
		}

		//Remove pink marker from candidate list
		pinkKeyPoints.erase(pinkKeyPoints.begin() + min, pinkKeyPoints.begin() + min + 1);
	}

	//Use the pink marker furthest to the left
	/*if ((pocketCount == 2 || pocketCount == 3) && !pinkKeyPoints.empty()){
		//Determine which pink side marker is being used.
		//Should be marker closest along line between first two pockets.
		float distance = -1;
		int min = 0;
		cv::Vec2f line = lineEqn(pockets[0].pocketPoints.pt.x, pockets[0].pocketPoints.pt.y, pockets[1].pocketPoints.pt.x, pockets[1].pocketPoints.pt.y);
		for (int i = 0; i < pinkKeyPoints.size(); i++){
			float newDistance = pinkKeyPoints[i].pt.x;
			if ((distance + 1) < epsilon || newDistance < distance){
				distance = newDistance;
				min = i;
			}
		}
		pockets[pocketCount].pocketPoints = pinkKeyPoints[min];
		pockets[pocketCount].xLocation = xLeft;
		pockets[pocketCount].yLocation = yMidTop;
		pinkKeyPoints.erase(pinkKeyPoints.begin() + min, pinkKeyPoints.begin() + min + 1);
		pocketCount++;
	}*/

	//If 2 or 3 pockets are picked up, use any pink side marker
	/*if (pocketCount == 2 || pocketCount == 3){
		//Determine which pink side marker is being used.
		//Should be marker closest along line between first two pockets.
		float distance = -1;
		int min = 0;
		cv::Vec2f line = lineEqn(pockets[0].pocketPoints.pt.x, pockets[0].pocketPoints.pt.y, pockets[1].pocketPoints.pt.x, pockets[1].pocketPoints.pt.y);
		for (int i = 0; i < pinkKeyPoints.size(); i++){
			float newDistance = distPointToLine(pinkKeyPoints[i].pt.x, pinkKeyPoints[i].pt.y, line);
			if ((distance + 1) < epsilon || newDistance < distance){
				distance = newDistance;
				min = i;
			}
		}
		pockets[pocketCount].pocketPoints = pinkKeyPoints[min];
		pockets[pocketCount].xLocation = (pockets[0].xLocation + pockets[1].xLocation) / 2;
		pockets[pocketCount].yLocation = (pockets[0].yLocation + pockets[1].yLocation) / 2;
		pocketCount++;
	}*/

	//If 2 pockets are picked up, use a pink marker not linearly dependent with the pockets.
	//This is accomplished by finding the pink marker furthest from the line.
	/*if (realPocketCount == 2){
		//Determine which pink side marker is being used.
		//Should be marker furthest along line between pockets.
		float distance = 0;
		int max = 0;
		cv::Vec2f line = lineEqn(pockets[0].pocketPoints.pt.x, pockets[0].pocketPoints.pt.y, pockets[1].pocketPoints.pt.x, pockets[1].pocketPoints.pt.y);
		for (int i = 0; i < pinkKeyPoints.size(); i++){
			float newDistance = distPointToLine(pinkKeyPoints[i].pt.x, pinkKeyPoints[i].pt.y, line);
			if ( newDistance > distance){
				distance = newDistance;
				max = i;
			}
			pockets[pocketCount].pocketPoints = pinkKeyPoints[max];
			//Remove pink Keypoint so it doesn't get used as 4th point in the transform.

			if (!pinkKeyPoints.empty()){
				pinkKeyPoints.erase(pinkKeyPoints.begin() + max, pinkKeyPoints.begin() + max + 1);
			}
			pocketCount++;
			//Need to determine coordinates for point in perspective transform
			addNonLinearPointLocation(pockets);
		}
	}*/

	/*//If 3 pockets are picked up, use any pink side marker
	if (pocketCount == 3){
		//Determine which pink side marker is being used.
		//Should be marker closest along line between first two pockets.
		float distance = -1;
		int min = 0;
		cv::Vec2f line = lineEqn(pockets[0].pocketPoints.pt.x, pockets[0].pocketPoints.pt.y, pockets[1].pocketPoints.pt.x, pockets[1].pocketPoints.pt.y);
		for (int i = 0; i < pinkKeyPoints.size(); i++){
			float newDistance = distPointToLine(pinkKeyPoints[i].pt.x, pinkKeyPoints[i].pt.y, line);
			if ((distance + 1) < epsilon || newDistance < distance){
				distance = newDistance;
				min = i;
			}
			pockets[pocketCount].pocketPoints = pinkKeyPoints[min];

			if (!pinkKeyPoints.empty()){
				pinkKeyPoints.erase(pinkKeyPoints.begin() + min);
			}
			//Need to determine coordinates for point in perspective transform
			//First calculate distance from both known pocket points
			float distToPocket0 = distBetweenKeyPoints(pockets[0].pocketPoints, pockets[pocketCount - 1].pocketPoints);
			float distToPocket1 = distBetweenKeyPoints(pockets[1].pocketPoints, pockets[pocketCount - 1].pocketPoints);
			
			//For the case where both pockets are top pockets, pink pocket must be directly below these.
			if (inferPurple && inferOrange){
				pockets[pocketCount].yLocation = yMidTop;
				if (distToPocket0 < distToPocket1)
					pockets[pocketCount].xLocation = xLeft;
				else
					pockets[pocketCount].xLocation = xRight;
			}

			if (!inferOrange){
				if (distToPocket0 < distToPocket1){
					pockets[pocketCount].xLocation = xMid;
					pockets[pocketCount].yLocation = yTop;
				}
				else{
					//May get here in test video on accident
					//Then logic is broken.
					pockets[pocketCount].xLocation = xRight;
					pockets[pocketCount].yLocation = yMidBot;
				}
			}

			if (!inferPurple){
				if (distToPocket0 < distToPocket1){
					pockets[pocketCount].xLocation = xMid;
					pockets[pocketCount].yLocation = yTop;
				}
				else{
					//Should never get here in test video
					pockets[pocketCount].xLocation = xRight;
					pockets[pocketCount].yLocation = yMidBot;
				}
			}

			//Increase pocket count once all locations are set.
			pocketCount++;
		}
	}*/

	/*while (pockets.size() >= 2 && pockets.size() < 4 && !pinkKeyPoints.empty()){
		if (pinkKeyPoints.size() > 0){
			int i = 0;
			float distance = -1;
			
			else{
				//Find equation for line
				for (int j = 0; j < pinkKeyPoints.size(); j++){
					float newDistance = sqrt();
					if (distance == 0 || newDistance < distance){

					}

				}
				pockets[pocketCount] = pinkKeyPoints(i);

				if (!pinkKeyPoints.empty()){
					pinkKeyPoints.erase(pinkKeyPoints.begin() + i);
				}
				pocketCount++;
			}
		}
	}*/

	/*if (pocketCount == 3){
		cv::KeyPoint tempPoint = cv::KeyPoint();
		tempPoint.pt.x = (pockets[0].pocketPoints.pt.x + pockets[1].pocketPoints.pt.x) / 2;
		tempPoint.pt.y = (pockets[0].pocketPoints.pt.y + pockets[1].pocketPoints.pt.y) / 2;
		pockets[pocketCount].pocketPoints = tempPoint;
		pockets[pocketCount].xLocation = (pockets[0].xLocation + pockets[1].xLocation) / 2;
		pockets[pocketCount].yLocation = (pockets[0].yLocation + pockets[1].yLocation) / 2;
		pocketCount++;
	}*/

	return pockets;
}