Esempio n. 1
0
    void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
    {
        CV_INSTRUMENT_REGION()

        std::vector<Point2f> corners;

        if (_image.isUMat())
        {
            UMat ugrayImage;
            if( _image.type() != CV_8U )
                cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
            else
                ugrayImage = _image.getUMat();

            goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
                                 blockSize, useHarrisDetector, k );
        }
        else
        {
            Mat image = _image.getMat(), grayImage = image;
            if( image.type() != CV_8U )
                cvtColor( image, grayImage, COLOR_BGR2GRAY );

            goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
                                blockSize, useHarrisDetector, k );
        }

        keypoints.resize(corners.size());
        std::vector<Point2f>::const_iterator corner_it = corners.begin();
        std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
        for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
            *keypoint_it = KeyPoint( *corner_it, (float)blockSize );

    }
void CV_GoodFeatureToTTest::run_func()
{
    int cn = src_gray.channels();

    CV_Assert( cn == 1 );
    CV_Assert( ( CV_MAT_DEPTH(SrcType) == CV_32FC1 ) || ( CV_MAT_DEPTH(SrcType) == CV_8UC1 ));

    TEST_MESSAGEL ("             maxCorners = ", maxCorners)
    if (useHarrisDetector)
    {
        TEST_MESSAGE ("             useHarrisDetector = true\n");
    }
    else
    {
        TEST_MESSAGE ("             useHarrisDetector = false\n");
    }

    if( CV_MAT_DEPTH(SrcType) == CV_32FC1)
    {
        if (src_gray.depth() != CV_32FC1 ) src_gray.convertTo(src_gray32f, CV_32FC1);
        else   src_gray32f = src_gray.clone();

        TEST_MESSAGE ("goodFeaturesToTrack 32f\n")

        goodFeaturesToTrack( src_gray32f,
               corners,
               maxCorners,
               qualityLevel,
               minDistance,
               Mat(),
               blockSize,
               gradientSize,
               useHarrisDetector,
               k );
    }
    else
    {
        if (src_gray.depth() != CV_8UC1 ) src_gray.convertTo(src_gray8U, CV_8UC1);
        else   src_gray8U = src_gray.clone();

        TEST_MESSAGE ("goodFeaturesToTrack 8U\n")

        goodFeaturesToTrack( src_gray8U,
               corners,
               maxCorners,
               qualityLevel,
               minDistance,
               Mat(),
               blockSize,
               gradientSize,
               useHarrisDetector,
               k );
    }
}
Esempio n. 3
0
void GetTrackedPoints(const mat3b & im1, const mat3b & im2, vector<TrackedPoint> & points_out, 
		      int maxCorners, float qualityLevel, float minDistance, int blockSize,
		      int winSize_, int maxLevel, int criteriaN, float criteriaEps) {
#if 1
  const int useHarrisDetector = 0;
  const float k = 0.04f;
  const Size winSize(winSize_, winSize_);
  const TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
					     criteriaN, criteriaEps);
  const double derivLambda = 0;
  const int flags = 0;
  assert(im1.size() == im2.size());
  matb im1gray;
  cvtColor(im1, im1gray, CV_BGR2GRAY);
#ifdef OPENCV_2_1
  Mat mask;
  vector<Point2f> corners1, corners2;
  vector<uchar> status;
  vector<float> err;
  goodFeaturesToTrack(im1gray, corners1, maxCorners, qualityLevel, minDistance,
		      mask, blockSize, useHarrisDetector, k);
  calcOpticalFlowPyrLK(im1, im2, corners1, corners2, status, err, winSize, maxLevel,
		       criteria, derivLambda, flags);
  for (int i = 0; i < (signed)corners1.size(); ++i)
    if (status[i])
      points_out.push_back(TrackedPoint(corners1[i].x, corners1[i].y,
					corners2[i].x, corners2[i].y));
#else
  Mat corners1, corners2, status, err;
  goodFeaturesToTrack(im1gray, corners1, maxCorners, qualityLevel, minDistance,
		      noArray(), blockSize, useHarrisDetector, k);
  calcOpticalFlowPyrLK(im1, im2, corners1, corners2, status, err, winSize, maxLevel,
		       criteria, derivLambda, flags);
  for (int i = 0; i < corners1.size().height; ++i)
    if (status.at<unsigned char>(i,0))
      points_out.push_back(TrackedPoint(corners1.at<Vec2f>(i,0)[0],corners1.at<Vec2f>(i,0)[1],
					corners2.at<Vec2f>(i,0)[0],corners2.at<Vec2f>(i,0)[1]));
#endif
#else
  matb im1_gray, im2_gray;
  cvtColor(im1, im1_gray, CV_BGR2GRAY);
  cvtColor(im2, im2_gray, CV_BGR2GRAY);
  Mat flow_cv(im1.size().height, im1.size().width, CV_32FC2);
  calcOpticalFlowFarneback(im1_gray, im2_gray, flow_cv, 0.5, 5, 11, 10, 5, 1.1, 0);
  
  points_out.clear();
  for (int i = 20; i < im1.size().height-20; i += 20)
    for (int j = 20; j < im1.size().width-20; j += 20) {
      const Vec2f f = flow_cv.at<Vec2f>(i, j);
      points_out.push_back(TrackedPoint(j, i, j+f[0], i+f[1]));
    }
  cout << "n points " << points_out.size() << endl;
#endif
}
Esempio n. 4
0
int InterframeRegister::InitFeatures(Mat fc, vector<Point2f> &arrFeaturePts)
{
	Mat eigImage,tempImage;
	int cornerCount = InterframeRegister::MAX_KLT_POINTS;
	goodFeaturesToTrack(fc, arrFeaturePts, cornerCount, FEATURE_QUALITY, MIN_FEATURE_DIST);
	return arrFeaturePts.size();
}
Esempio n. 5
0
int CVShiTomasiCorners::Process(CVPipeline * pipe)
{
	if (pipe->input.channels() != 1)
	{
		errorString = "Requires single-channel input image.";
		return -1;
	}
	try {
		//double qualityLevel = 0.01;
		//double minDistance = 10;
		//int blockSize = 3;
		//bool useHarrisDetector = false;
		//double k = 0.04;
	
		/// Apply corner detection
		goodFeaturesToTrack( pipe->input,
					pipe->corners,
					maxCorners->GetInt(),
					qualityLevel->GetFloat(),
					minimumDistance->GetFloat(),
					cv::Mat(),
					blockSize->GetInt(),
					useHarrisDetector->GetInt(),
					k->GetFloat());
	}
	catch (...)
	{
		errorString = ";_;";
		return CVReturnType::NOTHING;
	}
	returnType = CVReturnType::CV_CORNERS;
	return returnType;
}
Esempio n. 6
0
vector<Point2f> ASEF_Algorithm::getInnerCanthus(Mat input_image, Rect leftEyeRect, Rect rightEyeRect) {
    vector<Point2f> canthus(2);
    Mat gray_img;
    Rect canthus_ll_rect = getCanthus_LL_Rect(leftEyeRect);
    Rect canthus_rr_rect = getCanthus_RR_Rect(rightEyeRect);
    Mat  eyeCornerMask = Mat::zeros(input_image.size(), CV_8UC1);
    eyeCornerMask(canthus_ll_rect) = 255;
    eyeCornerMask(canthus_rr_rect) = 255;
    if (input_image.channels() == 3) {
        cvtColor(input_image, gray_img, CV_BGR2GRAY);
    } else gray_img = input_image;
    
    //    equalizeHist(gray_img(leftEyeRect), gray_img(leftEyeRect));
    //    equalizeHist(gray_img(rightEyeRect), gray_img(rightEyeRect));
    
    Mat masked ;
    gray_img.copyTo(masked, eyeCornerMask);
    
    goodFeaturesToTrack(gray_img, canthus, 2 , 0.05, 40,eyeCornerMask,6,false);
    
    imshow("canthus gray",masked);
    
    if (canthus[0].x > canthus[1].x) {
        swap(canthus[0],canthus[1]);
    }
    
    return canthus;
}
/**
    Das aktuelle Bild (index) wird in allen positiven und negativen Beispielen gesucht (getrackt).


*/
bool NearestNeighbourClassifier::classifyViaTrack(int index, Result* result, int* grid, Mat& f)
{
    // Patch vorbereiten
    Mat patch = f(Rect(grid[index * 4],grid[index * 4 + 1],grid[index * 4 + 2],grid[index * 4 + 3] ));
    Mat normalizedPatch;
    Utility::generateNormalizedPatch(patch,normalizedPatch);
    /// TEST!!! Mal goodFeaturesToTrack ins Bild!
    vector<Point2f> features;
    goodFeaturesToTrack(normalizedPatch,features,500,0.1,10);
    // Ausgabe > ausgeschaltet
    //for (size_t i = 0; i < features.size(); ++i)
    //    cout << features.at(i) << endl;

    Rect b1 = Rect(0,0,15,15), b2;
    size_t i = 0;
    // Über alle pos. Beispiele iterieren
    for(i = 0; i < positiveExamples.size(); i++)
    {
        // Patch "tracken"
        if (tracker->track(patch,positiveExamples.at(i),b1, b1,b2)) cout << "Positive Getracked" << endl;
        else cout << "Positive NICHT getracked" << endl;

    }




    return true;
}
Esempio n. 8
0
// ------------------------------------------------------
void FlowFinder::findTrackingPoints( Image& frame )
{				
	// automatic initialization
	frame.cvImg.copyTo(gray);
	goodFeaturesToTrack( gray, curr_points, MAX_COUNT, 0.01, 10, cv::Mat(), 3, 0, 0.04);
	cornerSubPix(gray, curr_points, winSize, cv::Size(-1,-1), termcrit);
	addRemovePt = false;
}
Esempio n. 9
0
void GoodFeaturesToTrackDetector::detectImpl(const Mat& image, const Mat& mask,
        vector<KeyPoint>& keypoints) const {
    vector<Point2f> corners;
    goodFeaturesToTrack(image, corners, maxCorners, qualityLevel, minDistance, mask,
                        blockSize, useHarrisDetector, k);
    keypoints.resize(corners.size());
    vector<Point2f>::const_iterator corner_it = corners.begin();
    vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
    for (; corner_it != corners.end(); ++corner_it, ++keypoint_it) {
        *keypoint_it = KeyPoint(*corner_it, (float)blockSize);
    }
}
void OpticalFlow::getOpticalFlowPoints(const Rect &rect, Mat &gray) {
    if (rect.x >= 0 && rect.y >= 0 && rect.x + rect.width < gray.cols && rect.y + rect.height < gray.rows) {
        Mat grayTrimmed(gray, rect);
        goodFeaturesToTrack(grayTrimmed, data.points[1], data.MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
        if (!data.points[1].empty()) {
            if (rect.width > data.subPixWinSize.width * 2 + 5 && rect.height > data.subPixWinSize.height * 2 + 5)
                cornerSubPix(grayTrimmed, data.points[1], data.subPixWinSize, Size(-1, -1), data.termcrit);
            for (Point2f &p : data.points[1]) {
                p.x += rect.x; // project points to image
                p.y += rect.y; // project points to image
            }
        }
    }
}
Esempio n. 11
0
void calcImageDisplacement(Mat img1, Mat img2, vector<uchar> *status, vector<float> *err) {
	vector<Point2f> corners1, corners2;
	goodFeaturesToTrack(img1, corners1, N_CORNERS, FEATURE_QUALITY, MIN_DISTANCE);

	int nIterations = 30;
	double epislon = .01f;
	TermCriteria tc (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, nIterations, epislon);
	Size winSize = Size(3, 3);
	Size zeroZone = Size(-1, -1);
	cornerSubPix(img1, corners1, winSize, zeroZone, tc);

	int maxLevel = 3;
	calcOpticalFlowPyrLK(img1, img2, corners1, corners2,
	                     (*status), (*err), winSize, maxLevel);
}
Esempio n. 12
0
void GFTTDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
{
    Mat grayImage = image;
    if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );

    vector<Point2f> corners;
    goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, mask,
                         blockSize, useHarrisDetector, k );
    keypoints.resize(corners.size());
    vector<Point2f>::const_iterator corner_it = corners.begin();
    vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
    for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
    {
        *keypoint_it = KeyPoint( *corner_it, (float)blockSize );
    }
}
	void findGoodCorners2(const Mat &grayFrame, const SoccerPitchData &data, Mat &currToKeyTrans, Mat &keyToTopTrans) {
		Mat topToCurrTrans;
		invert(keyToTopTrans * currToKeyTrans, topToCurrTrans);
		vector<Point2f> imagePitchOuterContour;
		perspectiveTransform(data.pitchOuterPoints, imagePitchOuterContour, topToCurrTrans);

		vector<Point2f> hull;
		convexHull(imagePitchOuterContour, hull);

		Mat mask = Mat::zeros(frameSize, CV_8UC1);
		fillConvexPoly(mask, vector<Point>(hull.begin(), hull.end()), Scalar(1, 0, 0));

		dilate(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(3, 3)));

		Mat bin;
		adaptiveThreshold(grayFrame, bin, 255, ADAPTIVE_THRESH_MEAN_C , THRESH_BINARY, 5, -10);
		
		vector<Point2f> candidateCorners;
		goodFeaturesToTrack(bin, candidateCorners, 100, 0.01, 24, mask);

		cornerSubPix(bin, candidateCorners, Size(5, 5), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS & CV_TERMCRIT_ITER, 40, 0.001));

		vector<Point2f> goodPoints;
		for (Point2f corner : candidateCorners) {
			if (goodCornerCheck(corner, bin) && !closeToBoundary(corner))
				goodPoints.push_back(corner);
		}

		if (goodPoints.size() > 0) {
			vector<Point2f> reprojGoodPoints;
			perspectiveTransform(goodPoints, reprojGoodPoints, keyToTopTrans * currToKeyTrans);
			// try to add these new corners into the relocatedCorners
			for (int i = 0; i < reprojGoodPoints.size(); i++) {
				// if does not exists already and coincide with reproj of 28 points
				bool exists = hasSimilarPoint(relocatedPitchPoints, reprojGoodPoints[i], 10) ;
				int minId = findClosestPoint(data.pitchPoints, reprojGoodPoints[i]);
				double minDist = norm(reprojGoodPoints[i] - data.pitchPoints[minId]);
				if ((!exists ) && (minDist < 16) && (minDist < reprojErr[minId])) {
					relocatedCorners.push_back(goodPoints[i]);
					relocatedPitchPoints.push_back(data.pitchPoints[minId]);
					reprojErr[minId] = minDist;
				}
			}
		}

		cout<<relocatedCorners.size()<<" points relocated"<<endl;
	}
Esempio n. 14
0
void HeartFeatureTracker::initialize(Rect bboxInit, Mat &colorImage, Mat &depthImage, int lightConditions) {

    bbox = bboxInit;

    convertRectToMats(boundBox, bbox);

    convertRectToMats(patchOfInterest, getForeheadFromBbox(bbox));

    cvtColor(depthImage, prevGray, COLOR_BGR2GRAY);

    Mat roi = prevGray(bbox);

    goodFeaturesToTrack(roi, prevPoints, 500, 0.01, 1, Mat(), 3, 0, 0.04);
    cornerSubPix(roi, prevPoints, Size(10,10), Size(-1,-1), TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));

    prevGray = roi;
}
void FeatureTrackerKLTCv::addPointsFromRegion(const cv::Mat &img,const cv::Rect& roi)
{
    cv::Mat imgGray;
    cv::cvtColor(img, imgGray, CV_BGR2GRAY);

    double qualityLevel = 0.05;
    double minDistance = 5.0;
    int maxCorners = 20;
    std::vector<cv::Point2f> points;
    goodFeaturesToTrack(imgGray(roi),points,maxCorners,qualityLevel,minDistance);
    //the points are wrt roi axis. Convert to img axis.
    for(cv::Point2f& p : points){
        p.x+=roi.x;
        p.y+=roi.y;
        this->addPointToTrack(p);
    }


}
Esempio n. 16
0
vector<Point2f> ASEF_Algorithm::getOutterCanthus(Mat input_image, Rect leftEyeRect, Rect rightEyeRect)
{
    vector<Point2f> canthus(2);
    Mat gray_img;
    
    Rect canthus_lr_rect = getCanthus_LR_Rect(leftEyeRect);
    Rect canthus_rl_rect = getCanthus_RL_Rect(rightEyeRect);
    
    Mat  eyeCornerMask = Mat::zeros(input_image.size(), CV_8UC1);
    eyeCornerMask(canthus_lr_rect) = 255;
    eyeCornerMask(canthus_rl_rect) = 255;
    Mat eyeCornerMasked ;
    cvtColor(input_image, gray_img, CV_BGR2GRAY);
    goodFeaturesToTrack(gray_img, canthus, 2 , 0.05, 40,eyeCornerMask,6,false);
    
    if (canthus[0].x < canthus[1].x)
        iter_swap(canthus.begin(), canthus.end());
    
    return canthus;
}
Esempio n. 17
0
//--------------------------------------------------------------------------------------
//       Class:  ARC_Pair
//      Method:  ARC_Pair :: convert_to_point
// Description:  Returns the coordinate of a feature in the center of a rect bounded in
// the center by Size s.
//--------------------------------------------------------------------------------------
    cv::Point
ARC_Pair::convert_to_point ( const cv::Rect& r, const cv::Mat& img, const cv::Size& s )
{
    std::vector<cv::Point> lv;
    cv::Rect little;
    cv::Mat gray;
    cv::Mat mask;

    little = r;
    little += 0.5*cv::Point( r.size()-s );
    little -= r.size() - s;

    cvtColor( img, gray, CV_BGR2GRAY );
    mask = cv::Mat::zeros( img.size(), CV_8UC1 );
    rectangle( mask, little, 255, CV_FILLED );

    goodFeaturesToTrack( gray, lv, 1, 0.01, 10, mask, 3, 0, 0.04);

    return ( lv.size()>0 ) ? lv[0] : cv::Point( -1, -1 ) ;
}		// -----  end of method ARC_Pair::convert_to_point  ----- 
Esempio n. 18
0
int main (int argc, char** argv){
	// Initialisierung
	if (argc == 1){
		initialize(inifile);
	} else if (argc > 1){
		initialize(argv[1]);
	}

	// Objekterkennung - Shi Tomasi
	Mat src = imread(inputPicture.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
	vector<Point2f> corners;

	goodFeaturesToTrack(src, corners, maxCorners, qualityLevel, 
			minDistance, Mat(), blockSize, useHarrisDetector, k);
	cout << "Erkannte Ecken :" << corners.size() << endl;

	// sortieren der Eckpunkte nach der Entfernung zum Ursprung
	vector<Point2f> sortCorners = sortVectorPoints(corners);

	
	// exportieren der Eckpunkte
	ofstream exportfile(exportFile, ios::trunc);
	if (exportfile.is_open()){
		for (int i = 0; i < sortCorners.size(); i++){
			string line = to_string(sortCorners[i].x) + ";" + to_string(sortCorners[i].y);
			exportfile << line << endl;
		}
		exportfile.close();
	}

	
	// exportieren eines Bildes mit erkannten Ecken
	Mat copy;
	copy = src.clone();

	for( int i = 0; i < sortCorners.size(); i++ )
		{ circle( copy, sortCorners[i], circle_radius, Scalar(0),
					2, 8, 0 ); }
	imwrite(exportPicFile, copy);
}
Esempio n. 19
0
void LucasKanadeOpticalFlow(Mat& previous_gray_frame, Mat& gray_frame, Mat& display_image)
{
	Size img_sz = previous_gray_frame.size();
	int win_size = 10;
	cvtColor(previous_gray_frame, display_image, CV_GRAY2BGR);
	vector<Point2f> previous_features, current_features;
	const int MAX_CORNERS = 500;
	goodFeaturesToTrack(previous_gray_frame, previous_features, MAX_CORNERS, 0.05, 5, noArray(), 3, false, 0.04);
	cornerSubPix(previous_gray_frame, previous_features, Size(win_size, win_size), Size(-1,-1),
                 TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
    vector<uchar> features_found;
	calcOpticalFlowPyrLK(previous_gray_frame, gray_frame, previous_features, current_features, features_found, noArray(),
                         Size(win_size*4+1,win_size*4+1), 5,
                         TermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ));
    for( int i = 0; i < (int)previous_features.size(); i++ )
	{
		if( !features_found[i] )
			continue;
        circle(display_image, previous_features[i], 1, Scalar(0,0,255));
		line(display_image, previous_features[i], current_features[i], Scalar(0,255,0));   
	}
}
Point2i InputProcessing::getRightEyeCorner(Mat gray, Rect rightEye) {
    Rect rightEyeCorner = rightEye;
    //omit top 1/4 of image
    rightEyeCorner.y += rightEyeCorner.height * .25;
    rightEyeCorner.height /= 2;
    rightEyeCorner.x += .5 * rightEyeCorner.width;
    rightEyeCorner.width *= .5;

    Mat im = Mat(gray, rightEyeCorner);
    vector<Point2i> features;
    //	GaussianBlur(im, im, Size(3, 3), 0, 0);
    goodFeaturesToTrack(im, features, 15, .15, rightEyeCorner.height / 16);


    double minDist = DBL_MAX, minIndex = -1, i = 0;

    for (Point2i p : features) {
        //ydist = distnace from middle of inner edge of leftEye rectangle
        double ydist = (p.y - (rightEye.height / 4));
        double xdist = (p.x - (rightEye.width / 2));
        double dist = xdist * xdist + ydist * ydist * 4;

        if (dist < minDist) {
            minDist = dist;
            minIndex = i;
        }
        i++;
    }

    if (minIndex >= 0) {
        Point2i res = features[minIndex] + Point2i(rightEyeCorner.x, rightEyeCorner.y);
        if (DEBUG_MODE) {
            //circle(drawFrame, Point2i(rightEye.x + rightEye.width, rightEye.y + rightEye.height / 2), 1, Scalar(255, 10, 10));
        }
        return res;
    }

    return Point2i(-1, -1);
}
Point2i InputProcessing::getLeftEyeCorner(Mat gray, Rect leftEye) {
    Rect leftEyeCorner = leftEye;
    //omit top 1/4 of image
    leftEyeCorner.y += (int) (leftEyeCorner.height * .25);
    leftEyeCorner.height /= 2;

    leftEyeCorner.width *= .5;

    Mat im = Mat(gray, leftEyeCorner);
    vector<Point2i> features;
    //	GaussianBlur(im, im, Size(3, 3), 0, 0);
    goodFeaturesToTrack(im, features, 15, .15, leftEyeCorner.height / 8);

    double minDist = DBL_MAX, minIndex = -1, i = 0;

    for (Point2i p : features) {
        //ydist = distnace from middle of inner edge of leftEye rectangle
        double ydist = (p.y - (leftEye.height / 4));
        double dist = p.x * p.x + ydist * ydist * 4;
        // y difference is less likely for eye corner
        if (dist < minDist) {
            minDist = dist;
            minIndex = i;
        }
        i++;
    }

    if ( minIndex >= 0 ) {
        Point2i res = features[minIndex] + Point2i(leftEyeCorner.x, leftEyeCorner.y);
        if (DEBUG_MODE) {
            //circle(drawFrame, Point2i(leftEyeCorner.x, leftEyeCorner.y + leftEye.height / 4), 1, Scalar(10, 10, 255));
        }
        return res;
    }

    return Point2i(-1,-1);
}
Esempio n. 22
0
void MotionDetection(cv::Mat frame1, cv::Mat frame2)
{
    cv::Mat prev, next;
    cvtColor(frame1, prev, CV_BGR2GRAY); 
    cvtColor(frame2, next, CV_BGR2GRAY); 
    goodFeaturesToTrack( prev, 
            corners,
            maxCorners,
            qualityLevel,
            minDistance,
            cv::Mat(),
            blockSize,
            useHarrisDetector,
            k );
    cornerSubPix(prev, 
            corners,
            cvSize( 10, 10 ) ,
            cvSize( -1, -1 ), 
            cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
    std::vector<uchar> features_found;
    features_found.reserve(maxCorners);
    std::vector<float> feature_errors;
    feature_errors.reserve(maxCorners);
    calcOpticalFlowPyrLK(prev, next, corners, corners_b, features_found, 
            feature_errors, cvSize( 10, 10 ), 5, cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0);
    IplImage g = next;
    for( int i = 0; i < maxCorners; ++i )
    {
        CvPoint p0 = cvPoint( cvRound( corners[i].x ), cvRound( corners[i].y ) );
        CvPoint p1 = cvPoint( cvRound( corners_b[i].x ), cvRound( corners_b[i].y ) );
        cvLine( &g, p0, p1, CV_RGB(255,0,0), 3, CV_AA );
    }
    cv::Mat rs(&g);
    imshow( "result window", rs );  
    int key = cv::waitKey(5);
}
Esempio n. 23
0
int TrackThread::process(Mat &frame,Mat &output)
{
	int re=0;
	cvtColor (frame,gray,CV_BGR2GRAY);
	frame.copyTo (output);
	//特征点太少了,重新检测特征点
	//if(points[0].size ()<=10)
	{
		goodFeaturesToTrack (gray,//图片
			features,//输出特征点
			max_count,//特征点最大数目
			qlevel,//质量指标
			minDist);//最小容忍距离
		//插入检测到的特征点
		//	points[0].insert (points[0].end (),features.begin (),features.end ());
		//	initial.insert (initial.end (),features.begin (),features.end ());

		points[0]=features;
		initial=features;
	}
	//第一帧
	if(gray_prev.empty ()){
		gray.copyTo (gray_prev);
	}
	//根据前后两帧灰度图估计前一帧特征点在当前帧的位置
	//默认窗口是15*15
	calcOpticalFlowPyrLK (
		gray_prev,//前一帧灰度图
		gray,//当前帧灰度图
		points[0],//前一帧特征点位置
		points[1],//当前帧特征点位置
		status,//特征点被成功跟踪的标志
		err);//前一帧特征点点小区域和当前特征点小区域间的差,根据差的大小可删除那些运动变化剧烈的点

	int k = 0;
	//去除那些未移动的特征点
	for(int i=0;i<points[1].size();i++){
		if(acceptTrackedPoint (i))
		{
			initial[k]=initial[i];
			points[1][k++] = points[1][i];
		}
	}
	points[1].resize (k);
	initial.resize (k);
	//标记被跟踪的特征点

	for(int i=0;i<points[1].size ();i++)
	{
		//当前特征点到初始位置用直线表示
		line(output,initial[i],points[1][i],Scalar(20,150,210));
		//当前位置用圈标出
		circle(output,points[1][i],3,Scalar(120,250,10));
	}
	if(points[1].size()!=0)
		trend(output);
	//向心速度
	curx*=0.9;
	cury*=0.9;
	lens=min(output.rows,output.cols)/8;
	edge=min(output.rows,output.cols)/5;
	//控制域
	rectangle(output,
		Point(output.cols/2-lens,output.rows/2-lens),
		Point(output.cols/2+lens,output.rows/2+lens),
		Scalar(1,100,200),3);
	//追踪域
	rectangle(output,
		Point(edge,edge),
		Point(output.cols-edge,output.rows-edge),
		Scalar(10,210,1),3);
	if(first){
		if(dirx==1){
			re=4;
			line(output,
				Point(output.cols/2+lens,output.rows/2-lens),
				Point(output.cols/2+lens,output.rows/2+lens),
				Scalar(20,250,110),5);
		}
		else if(dirx==-1){
			re=3;
			line(output,
				Point(output.cols/2-lens,output.rows/2-lens),
				Point(output.cols/2-lens,output.rows/2+lens),
				Scalar(20,250,110),5);
		}
		if(diry==1){
			re=2;
			line(output,
				Point(output.cols/2-lens,output.rows/2+lens),
				Point(output.cols/2+lens,output.rows/2+lens),
				Scalar(20,250,110),5);
		}
		else if(diry==-1){
			re=1;
			line(output,
				Point(output.cols/2-lens,output.rows/2-lens),
				Point(output.cols/2+lens,output.rows/2-lens),
				Scalar(20,250,110),5);
		}
	}
	//为下一帧跟踪初始化特征点集和灰度图像
	circle(output,Point(output.cols/2+curx,output.rows/2+cury),10,Scalar(2,1,250),3);
	std::swap(points[1],points[0]);
	cv::swap(gray_prev,gray);
	return re;
}
Esempio n. 24
0
void ObjectTrackingTK::Run(cv::VideoCapture & capture)
{
    help();

    cv::Mat frame, currentGray, prevGray;
    std::vector<cv::Point> features;
    int maxNum = 500;
    int blockSize = 3;
    cv::namedWindow(GetName());
    bool findFeatures = false;
    bool canTrack = false;
    cv::Size trWin(7, 7);
    int iterNum = 5;

    while (true)
    {
        capture >> frame;
        cv::cvtColor(frame, currentGray, CV_BGR2GRAY);

        if (findFeatures)
        {
            goodFeaturesToTrack(currentGray, features, maxNum, 0.01, 10, cv::Mat(), blockSize, 0, 0.04);
            findFeatures = false;
            canTrack = true;

            prevGray = currentGray.clone();
        }
        else if (canTrack)
        {
            // Tomasi-Kanade algorythm
            for (int t = 0; t < iterNum; t++)
            {
                for (int n = 0; n < features.size(); n++)
                {
                    cv::Mat C(2, 2, CV_32FC1, cv::Scalar(0)), g(2, 1, CV_32FC1, cv::Scalar(0)), d(2, 1, CV_32FC1, cv::Scalar(0));
                    int xLeft, xRight, yLow, yHigh;
                    float Ix, Iy, It;

                    xLeft = features[n].x - (trWin.width - 1) / 2;
                    xRight = features[n].x + (trWin.width - 1) / 2;
                    yLow = features[n].y - (trWin.height - 1) / 2;
                    yHigh = features[n].y + (trWin.height - 1) / 2;
                    if (xLeft < 0 || yLow < 0 || xRight >= prevGray.cols - 1 || yHigh >= prevGray.rows - 1)
                        continue;

                    for (int i = yLow; i < yHigh; i++)
                    {
                        for (int j = xLeft; j < xRight; j++)
                        {
                            Ix = static_cast<float>(prevGray.at<uchar>(i, j + 1)) - static_cast<float>(prevGray.at<uchar>(i, j));
                            Iy = static_cast<float>(prevGray.at<uchar>(i + 1, j)) - static_cast<float>(prevGray.at<uchar>(i, j));
                            It = static_cast<float>(currentGray.at<uchar>(i, j)) - static_cast<float>(prevGray.at<uchar>(i, j));

                            C.at<float>(0, 0) += Ix * Ix;
                            C.at<float>(0, 1) += Ix * Iy;
                            C.at<float>(1, 0) += Ix * Iy;
                            C.at<float>(1, 1) += Iy * Iy;

                            g.at<float>(0, 0) -= It * Ix;
                            g.at<float>(1, 0) -= It * Iy;
                        }
                    }

                    d = C.inv() * g;

                    features[n].x += static_cast<int>(d.at<float>(0, 0) + 0.5);
                    features[n].y += static_cast<int>(d.at<float>(1, 0) + 0.5);
                }
            }
        }

        for (int n = 0; n < features.size(); n++)
        {
            cv::circle(frame, features[n], 3, cv::Scalar(255, 255, 1), -1);
        }

        prevGray = currentGray.clone();

        cv::imshow(GetName(), frame);
        char c = (char)cv::waitKey(10);
        if (c == 27)
            return;
        switch (c)
        {
        case 'r':
            findFeatures = true;
            break;
        case 'c':
            features.clear();
            canTrack = false;
            break;
        }
    }
}
KDvoid CornerSubPix ( KDint nIdx )
{
	string	sMsg;
	KDchar	szStr [ 256 ];

	Mat		tSrc;
	Mat		tDst;
	Mat		tGray;
	KDint	nThresh;
	RNG		tRng ( 12345 );

	nThresh = 205;

	// Load source image and convert it to gray
	tSrc = imread ( "/res/image/apple.png" );
	cvtColor ( tSrc, tGray, CV_BGR2GRAY );

	//
	// Apply Shi-Tomasi corner detector
	//
	// Parameters for Shi-Tomasi algorithm
	vector<Point2f>		aCorners;

	KDdouble	dQualityLevel		= 0.01;
	KDdouble	dMinDistance		= 10;
	KDint		nMaxCorners			= 4;
	KDint		nBlockSize			= 3;
	bool		bUseHarrisDetector	= false;
	KDdouble	dK					= 0.04;

	// Copy the source image
	tDst = tSrc.clone ( );

	// Apply corner detection
	goodFeaturesToTrack ( tGray, aCorners, nMaxCorners, dQualityLevel, dMinDistance, Mat ( ), nBlockSize, bUseHarrisDetector, dK ); 

	// Draw corners detected
	kdSprintfKHR ( szStr, "** Number of corners detected: %d\n", aCorners.size ( ) );
	sMsg = szStr;

	KDint		nR = 4;
	for ( KDuint i = 0; i < aCorners.size ( ); i++ )
	{
		circle ( tDst, aCorners [ i ], nR, Scalar ( tRng.uniform ( 0, 255 ), tRng.uniform ( 0, 255 ), tRng.uniform ( 0, 255 ) ), -1, 8, 0 );
	}

	// Set the neeed parameters to find the refined corners
	Size		tWinSize  = Size ( 5, 5 );
	Size		tZeroZone = Size ( -1, -1 );

	TermCriteria  tCriteria = TermCriteria ( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 );

	// Calculate the refined corner locations
	cornerSubPix ( tGray, aCorners, tWinSize, tZeroZone, tCriteria );

	// Write them down
	for ( KDuint i = 0; i < aCorners.size ( ); i++ )
	{
		kdSprintfKHR ( szStr, " -- Refined Corner [%d] ( %.3f, %.3f )\n", i, aCorners [ i ].x, aCorners [ i ].y );
		sMsg += szStr;
	}	

	g_pController->setFrame ( 1, tSrc );
	g_pController->setFrame ( 2, tDst );
	g_pController->setMessage ( sMsg.c_str ( ) );
}
Esempio n. 26
0
string optical_flow::get_final_direction(Mat m1, Mat m2,
		Rect old_hand_boundary, Rect new_hand_boundary) {

	left_count = 0;
	up_count = 0;
	down_count = 0;
	right_count = 0;
	non_count = 0;

	TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);

	cvtColor(m1, old_gray_img, COLOR_BGR2GRAY);//convert to gray
	cvtColor(m2, new_gray_img, COLOR_BGR2GRAY);

	// extract features
	goodFeaturesToTrack(old_gray_img, old_frame_points, max_count, .01,
			min_distance, Mat(), block_size, 0, .04);
	cornerSubPix(old_gray_img, old_frame_points, Size(10, 10), Size(-1, -1),
			termcrit);

	// track features in next frame
	vector<uchar> status;
	vector<float> err;
	calcOpticalFlowPyrLK(old_gray_img, new_gray_img, old_frame_points,
			new_frame_points, status, err, Size(10, 10), 3, termcrit, 0, 0.001);

	for (unsigned int i = 0; i < new_frame_points.size(); i++) {
		Point2f old_point = old_frame_points.at(i);
		Point2f new_point = new_frame_points.at(i);
		if (!(old_hand_boundary.contains(old_point)
				&& new_hand_boundary.contains(new_point)))
			continue;
		float dist = get_distance(old_point, new_point);
		//		cout<<dist<<endl;
		if (dist < threshold) {
			directions.push_back(non_direction);
			non_count++;
		} else {
			float dx = new_point.x - old_point.x;
			float dy = new_point.y - old_point.y;
			if (abs(dx) > abs(dy)) {//horizontal
				if (abs(dx) <= thresh_in_on_dir)
					non_count++;
				else {
					if (dx < 0) {
						directions.push_back(left_direction);
						left_count++;
					} else {
						directions.push_back(right_direction);
						right_count++;
					}
				}
			} else { //vertical
				if (abs(dy) <= thresh_in_on_dir)
					non_count++;
				else {
					if (dy < 0) {
						directions.push_back(up_direction);
						up_count++;
					} else {
						directions.push_back(down_direction);
						down_count++;
					}
				}
			}
		}
	}

	int dirs_counts[] = { up_count, down_count, left_count, right_count,
			non_count };
	int max_elem = *max_element(dirs_counts, dirs_counts + 5);
	//	cout<<up_count << " "<<down_count<<" "<<left_count<<" " <<right_count<<" "<<non_count<<endl;
	final_direction = "";
	if (up_count == max_elem)
		final_direction = "up";
	else if (down_count == max_elem)
		final_direction = "down";
	else if (left_count == max_elem)
		final_direction = "left";
	else if (right_count == max_elem)
		final_direction = "right";
	else
		final_direction = "none";

	return final_direction;
}
Esempio n. 27
0
// Lucas-Kanade optical flow
void QOpenCvImageBox::lucasKanadeOF( IplImage* img ) {
    Point2f pt;
    TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);  // ERROR

    bool addRemovePt = false;

    Size winSize(10,10);

    cvtColor(image, gray, CV_BGR2GRAY);

    if( nightMode )
        image = Scalar::all(0);

    if( needToInit )
    {
        // automatic initialization
        goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
        cornerSubPix(gray, points[1], winSize, Size(-1,-1), termcrit);
        addRemovePt = false;
    }
    else if( !points[0].empty() )
    {
        vector<uchar> status;
        vector<float> err;
        if(prevGray.empty())
            gray.copyTo(prevGray);
        calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize, 3, termcrit, 0);
        size_t i, k;
        for( i = k = 0; i < points[1].size(); i++ )
        {
            if( addRemovePt )
            {
                if( norm(pt - points[1][i]) <= 5 )
                {
                    addRemovePt = false;
                    continue;
                }
            }

            if( !status[i] )
                continue;

            points[1][k++] = points[1][i];
            circle( image, points[1][i], 3, Scalar(0,255,0), -1, 8);
        }
        points[1].resize(k);
    }

    if( addRemovePt && points[1].size() < (size_t)MAX_COUNT )
    {
        vector<Point2f> tmp;
        tmp.push_back(pt);
        cornerSubPix( gray, tmp, winSize, cvSize(-1,-1), termcrit);
        points[1].push_back(tmp[0]);
        addRemovePt = false;
    }

    needToInit = false;
    /*
    imshow("LK Demo", image);

    char c = (char)waitKey(10);
    if( c == 27 )
        break;
    switch( c )
    {
    case 'r':
        needToInit = true;
        break;
    case 'c':
        points[1].clear();
        break;
    case 'n':
        nightMode = !nightMode;
        break;
    default:
        ;
    }
    */

    std::swap(points[1], points[0]);
    swap(prevGray, gray);
}
Esempio n. 28
0
/**
 * Sparse optical flow tracking (Lucas-Kanade method)
 */
std::vector<cv::Rect> 
ObjDetTrack::opticalFlowTracking(const cv::Mat& currframe)
{
  cv::Mat graycurrframe;
  cvtColor(currframe, graycurrframe, cv::COLOR_RGB2GRAY);
  
  assert(objTrackWindow.size() > 0);

  //if new objects detected or old object lost or refresh requested, then find features again
  if(objCornerFeat.size() != objTrackWindow.size()){
    objCornerFeat.clear();

    for(uint32_t i=0; i<objTrackWindow.size(); i++){
      std::vector<cv::Point2f> cornerFeat;

      cv::Mat objImage(graycurrframe, objTrackWindow[i]);
      
      cv::Mat maskOval = cv::Mat::zeros(objImage.size(), CV_8UC1);
      cv::RotatedRect myrotrect = cv::RotatedRect(cv::Point2f(maskOval.cols/2, maskOval.rows/2),
         cv::Size2f(maskOval.cols*0.7, maskOval.rows), 0);
      ellipse(maskOval, myrotrect, cv::Scalar(255), -1, 8); //draw a filled ellipse

      /* maxCorners=50, qualityLevel=0.01, minDistance=5 */
      double minDistance = std::max((double)std::sqrt(objTrackWindow[i].area()/100) , 5.0);
      goodFeaturesToTrack(objImage, cornerFeat, 50, 0.01, minDistance, maskOval);
      
      //coordinates of objCornerFeat is absolute to the whole image,
      //while coordinates of cornerFeat is relative to the objTrackWindow
      cv::Point2f originOffset(objTrackWindow[i].x, objTrackWindow[i].y);
      for(uint32_t j=0; j<cornerFeat.size(); j++) 
        cornerFeat[j] += originOffset;
      objCornerFeat.push_back(cornerFeat);
      
      //std::cout<<"=|num corner feat:"<<cornerFeat.size()<<"|="<<std::endl;
    }
    
    return objTrackWindow;
  }

  //loop over corner features for every DT window
  for(uint32_t i=0; i<objTrackWindow.size(); i++){
    std::vector<cv::Point2f> nextPts;
    std::vector<uchar> status;
    std::vector<float> err;
    calcOpticalFlowPyrLK(previousFrame, graycurrframe, objCornerFeat[i], nextPts, status, err);
        
    //calculate stddev and mean
    float numValidPts = 0.0;
    cv::Point2f meanPt(0.0,0.0);
    for(uint32_t j=0; j<nextPts.size(); j++){
      if(status[j] == 1){
        meanPt += nextPts[j];
        numValidPts += 1.0;
      }
    }
    meanPt = meanPt * (1.0/numValidPts);
    
    cv::Point2f stddev(0.0,0.0);
    for(uint32_t j=0; j<nextPts.size(); j++){
      if(status[j] == 1){
        cv::Point2f tmp(nextPts[j] - meanPt);
        stddev += cv::Point2f(tmp.x*tmp.x , tmp.y*tmp.y);
      }
    }
    stddev = stddev * (1.0/numValidPts);
    stddev.x = std::sqrt(stddev.x);
    stddev.y = std::sqrt(stddev.y);
    
    //invalidates all points too far from mean
    cv::Point2f ab = stddev * 2.5;
    for(uint32_t j=0; j<nextPts.size(); j++){
      if(status[j] == 1){
        cv::Point2f tmp(nextPts[j] - meanPt);
        if((tmp.x/ab.x)*(tmp.x/ab.x)+(tmp.y/ab.y)*(tmp.y/ab.y)>1.0){
          status[j] = 0;
        }
      }
    }
    
    //remove all corner features that is not found from the tracking set
    //and all points too far from mean
    //remove/erase idiom, except customized std::remove for non-unary predicates
    //modified version of a suggested implementation of std::remove on cppreference.com
    std::vector<cv::Point2f>::iterator ow = objCornerFeat[i].begin();
    for(uint32_t j=0; j<objCornerFeat[i].size(); j++){
      if(status[j] == 1){ //condition such that jth item is not to be removed
        (*ow) = nextPts[j];
        ow++;
      }
    }
    objCornerFeat[i].erase(ow, objCornerFeat[i].end());
    
    //if number of valid corner features are below minimum amount (10), redo detection
    //std::cout<<"=|num corner feat left:"<<objCornerFeat[i].size()<<"|="<<std::endl;
    if(objCornerFeat[i].size() < C_NUM_MIN_CORNER_FEATURES){
      detOrTrackFlag = 0;
    }
    
    //update window is defined by points farthest from the cluster center
    float sm_x = objCornerFeat[i][0].x;
    float lg_x = objCornerFeat[i][0].x;
    float sm_y = objCornerFeat[i][0].y;
    float lg_y = objCornerFeat[i][0].y;
    for(uint32_t j=0; j<objCornerFeat[i].size(); j++){
      if(objCornerFeat[i][j].x < sm_x)
        sm_x = objCornerFeat[i][j].x;
      else if(objCornerFeat[i][j].x > lg_x)
        lg_x = objCornerFeat[i][j].x;
      if(objCornerFeat[i][j].y < sm_y)
        sm_y = objCornerFeat[i][j].y;
      else if(objCornerFeat[i][j].y > lg_y)
        lg_y = objCornerFeat[i][j].y;
    }
    
    if(sm_x < 0) sm_x = 0;
    if(sm_y < 0) sm_y = 0;
    if(lg_x >= graycurrframe.cols) lg_x = graycurrframe.cols-1;
    if(lg_y >= graycurrframe.rows) lg_y = graycurrframe.rows-1;
    
    //objTrackWindow[i] = cv::Rect(sm_x,sm_y,lg_x-sm_x,lg_y-sm_y);
    //objTrackWindow[i] = cv::Rect(meanPt.x-stddev.x*2.0,meanPt.y-stddev.y*2.0,stddev.x*4.0,stddev.y*4.0);
    cv::Size2f refSize = cv::Size2f(startingWindow[i].width,startingWindow[i].height);
    objTrackWindow[i] = cv::Rect(meanPt.x-refSize.width/2,meanPt.y-refSize.height/2,refSize.width,refSize.height);

    /*
    std::cout<<"DT window update:"<<(int)sm_x<<","<<(int)sm_y<<","
      <<(int)(lg_x-sm_x)<<","<<(int)(lg_y-sm_y)<<std::endl;
    std::cout<<"Approx by mean+stddev:"
      <<(int)(meanPt.x-stddev.x*2.0)<<","
      <<(int)(meanPt.y-stddev.y*2.0)<<","
      <<(int)(stddev.x*4.0)<<","
      <<(int)(stddev.y*4.0)<<std::endl;
    */
  } //end loop over corner features for every DT window

  return objTrackWindow;
}
Esempio n. 29
0
//--------------------------------------------------------------
void testApp::createTriangulation(){
    //1 find features on the color image
    featurePoints.clear();
    ofImage img;
    img.setUseTexture(false);
    img.setFromPixels(player.getVideoPlayer().getPixelsRef());
    img.setImageType(OF_IMAGE_GRAYSCALE);
    goodFeaturesToTrack(toCv(img),
                        featurePoints,
                        maxFeatures,
                        featureQuality,
                        minDistance);
    
    cout << "found " << featurePoints.size() << " features" << endl;
    
    //2 triangulated the features
    triangulate.reset();
    for(int i = 0; i < featurePoints.size(); i++){
        triangulate.addPoint(featurePoints[i].x,featurePoints[i].y, 0);   
    }
    triangulate.triangulate();
    
    //3 copy them into a 3d mesh
    triangulatedMesh.clear();
    vector<ofVec3f>& trianglePoints = triangulate.triangleMesh.getVertices();
    vector<ofVec2f>& textureCoords = meshBuilder.getMesh().getTexCoords();
    vector<bool> validVertIndeces;
    for(int i = 0; i < trianglePoints.size(); i++){
        int closestTexCoordIndex  = 0;
        float closestTexCoordDistance = 1000000;
        for(int j = 0; j < textureCoords.size(); j++){
            ofVec2f tri2d(trianglePoints[i].x,trianglePoints[i].y);
            float texCoordDist = tri2d.distanceSquared(textureCoords[j]);
            if(texCoordDist < closestTexCoordDistance){
                closestTexCoordDistance = texCoordDist;
                closestTexCoordIndex = j;
            }
        }
        ofVec3f vert = meshBuilder.getMesh().getVertex(closestTexCoordIndex);
        triangulatedMesh.addVertex(vert);
        triangulatedMesh.addTexCoord(meshBuilder.getMesh().getTexCoord(closestTexCoordIndex));
        validVertIndeces.push_back(vert.z < farClip && vert.z > 10);
    }
    
    //copy indices across
    faceNormals.clear();	
    faceCenters.clear();

	map<ofIndexType, vector<int> > vertexIndexToFaceIndex;
    for(int i = 0 ; i < triangulate.triangleMesh.getNumIndices(); i+=3){
        ofIndexType a,b,c;
        a = triangulate.triangleMesh.getIndex(i);
        if(!validVertIndeces[a]) continue;
        
        b = triangulate.triangleMesh.getIndex(i+1);
        if(!validVertIndeces[b]) continue;
        
        c = triangulate.triangleMesh.getIndex(i+2);
        if(!validVertIndeces[c]) continue;
        
        triangulatedMesh.addIndex(triangulate.triangleMesh.getIndex(i  ));
        triangulatedMesh.addIndex(triangulate.triangleMesh.getIndex(i+1));
        triangulatedMesh.addIndex(triangulate.triangleMesh.getIndex(i+2));

        //keep track of which faces belong to which vertices
    	vertexIndexToFaceIndex[a].push_back(faceNormals.size());
        vertexIndexToFaceIndex[b].push_back(faceNormals.size());
        vertexIndexToFaceIndex[c].push_back(faceNormals.size());
        
        //calculate the face normal
        ofVec3f& va = triangulatedMesh.getVertices()[a];
        ofVec3f& vb = triangulatedMesh.getVertices()[b];
        ofVec3f& vc = triangulatedMesh.getVertices()[c];
        ofVec3f faceNormal = (vb-va).getCrossed(vc-va).normalized();
        faceNormals.push_back( faceNormal );
        faceCenters.push_back( (va + vb + vc) / 3.);

    }
    
    //now go through and average the normals into the vertices
    triangulatedMesh.getNormals().resize(triangulatedMesh.getNumVertices());
    map<ofIndexType, vector<int> >::iterator it;
    for(it = vertexIndexToFaceIndex.begin(); it != vertexIndexToFaceIndex.end(); it++) {
        ofVec3f average(0,0,0);
		vector<int>& faceNormalIndices = it->second;
        for(int i = 0 ; i < faceNormalIndices.size(); i++){
            average += faceNormals[ faceNormalIndices[i] ];
        }
        average.normalize();
        triangulatedMesh.setNormal(it->first, average); 
    }
    
    //Create a lattice structure
    latticeMesh.clear();
    
    //copy the main vertices into the lattice mesh
    for(int i = 0; i < triangulatedMesh.getNumVertices(); i++){
        latticeMesh.addVertex(triangulatedMesh.getVertex(i));
        latticeMesh.addNormal(triangulatedMesh.getNormal(i));
    }
    
    innerPoints.clear();
    backInnerPoints.clear();
    backPoints.clear();

    
    
    
    //for each triangle, find the centroid and create 3 new vertices that move a fixed distane towards the center
    //then stitch them

    for(int i = 0 ; i < triangulatedMesh.getNumIndices(); i+=3){
        
        ofIndexType o1 = triangulatedMesh.getIndex(i);
        ofIndexType o2 = triangulatedMesh.getIndex(i+1);        
        ofIndexType o3 = triangulatedMesh.getIndex(i+2);
        
        ofVec3f& va = triangulatedMesh.getVertices()[o1];
        ofVec3f& vb = triangulatedMesh.getVertices()[o2];
        ofVec3f& vc = triangulatedMesh.getVertices()[o3];
        
        ofVec3f& center = faceCenters[i/3];
        ofVec3f& normal = faceNormals[i/3];
        
        ofVec3f innerA = va + (center - va).normalized() * 2;
        ofVec3f innerB = vb + (center - vb).normalized() * 2;
        ofVec3f innerC = vc + (center - vc).normalized() * 2;
        
        innerPoints.push_back(innerA);
        innerPoints.push_back(innerB);
        innerPoints.push_back(innerC);
        
    
        backPoints.push_back(va - triangulatedMesh.getNormal(o1) * 2);
        backPoints.push_back(vb - triangulatedMesh.getNormal(o2) * 2);
        backPoints.push_back(vc - triangulatedMesh.getNormal(o3) * 2);

        backInnerPoints.push_back(innerA - normal*2);
        backInnerPoints.push_back(innerB - normal*2);
        backInnerPoints.push_back(innerC - normal*2);

        //get the indices of the inner points
        ofIndexType i1 = latticeMesh.getNumVertices();
        ofIndexType i2 = i1+1;
        ofIndexType i3 = i1+2;
        
        //add the inner points to the mesh
        latticeMesh.addVertex(innerA);
        latticeMesh.addVertex(innerB);
        latticeMesh.addVertex(innerC);
        
        latticeMesh.addNormal(normal);
        latticeMesh.addNormal(normal);
        latticeMesh.addNormal(normal);
        
        //stitch the 3 quads around the inner mesh
        latticeMesh.addIndex(o1);latticeMesh.addIndex(o2);latticeMesh.addIndex(i2);
        latticeMesh.addIndex(i2);latticeMesh.addIndex(i1);latticeMesh.addIndex(o1);
        
        latticeMesh.addIndex(o2);latticeMesh.addIndex(o3);latticeMesh.addIndex(i3);
        latticeMesh.addIndex(i3);latticeMesh.addIndex(i2);latticeMesh.addIndex(o2);

        latticeMesh.addIndex(o3);latticeMesh.addIndex(o1);latticeMesh.addIndex(i1);
        latticeMesh.addIndex(i1);latticeMesh.addIndex(i3);latticeMesh.addIndex(o3);
    
        //add back vertices
        ofIndexType bo1 = latticeMesh.getNumVertices();
        ofIndexType bo2 = bo1+1;
        ofIndexType bo3 = bo1+2;
        
        latticeMesh.addVertex(innerA);
        latticeMesh.addVertex(innerB);
        latticeMesh.addVertex(innerC);

    }
    
    
    
}
std::pair<Mat, Mat> 
ImageRegistrator::registerImages(ImageList inputImages, int resizeFactor, int cornersAmount)
{
    Scaller                     scaller;
    MatrixList                  homographies;
    ImageList::const_iterator   selectedImage = inputImages.begin();
    ImageList::iterator         nthImage = inputImages.begin();
    PointVector                 selectedImageCorners(cornersAmount);
    PointVector                 nthImageCorners(cornersAmount);
    std::vector<uchar>          status(cornersAmount);
    std::vector<float>          error(cornersAmount);

    goodFeaturesToTrack(
            *selectedImage,
            selectedImageCorners,
            cornersAmount,
            0.01,
            1);

    cv::cornerSubPix(
            *selectedImage,
            selectedImageCorners,
            Size(5, 5),
            Size(-1, -1),
            TermCriteria(
                TermCriteria::COUNT + TermCriteria::EPS,
                6000,
                0.001)
            );


    for (; nthImage != inputImages.end(); ++nthImage) {
        if (nthImage != selectedImage) {
            calcOpticalFlowPyrLK(
                    *selectedImage,
                    *nthImage,
                    selectedImageCorners,
                    nthImageCorners,
                    status,
                    error);

            PointVector selImgCor = removeBadPoints(selectedImageCorners, status);
            PointVector nthImgCor = removeBadPoints(nthImageCorners, status);

            Mat H = findHomography(
                    selImgCor,
                    nthImgCor,
                    CV_RANSAC,
                    0.1);

            if (cv::norm(Point2f(H.at<double>(0,2), H.at<double>(1,2))) > 2) {
                nthImage->release();
                continue;
            }

            roundMatrixCoefficients(H, resizeFactor);
            homographies.push_back(H);
        }
    }  
    
    inputImages.erase(std::remove_if(inputImages.begin(),
                inputImages.end(),
                ImageRegistrator::ImageRemPred()),
            inputImages.end());
        
    inputImages = scaller.upscaleImages(inputImages, resizeFactor); 
    MatrixList::iterator h = homographies.begin();

    for (nthImage = inputImages.begin(); nthImage != inputImages.end(); ++nthImage) {
        if (nthImage != selectedImage) {
            util::printMatrix(*h, 12);
            warpPerspective(
                    nthImage->clone(),
                    *nthImage,
                    *h,
                    nthImage->size(),
                    cv::INTER_NEAREST | cv::WARP_INVERSE_MAP);
            ++h;
        }
    }

    Mat                 output(selectedImage->size(), selectedImage->type());
    std::list<uchar>    pixelValues;
    Mat                 medianWeights(output.size(), output.type());
    
    for (int i = 0; i < selectedImage->rows ; ++i) {
        for (int j = 0; j < selectedImage->cols; ++j) {

            for (nthImage = inputImages.begin(); nthImage != inputImages.end(); ++nthImage) {
                uchar value = (*nthImage).at<uchar>(i,j);

                if (value != 0)  {
                    pixelValues.push_back(value);
                }
            }

            if ( !pixelValues.empty() ) {
                output.at<uchar>(i,j) = produceMedian(pixelValues);
                medianWeights.at<uchar>(i,j) = 
                    static_cast<uchar>(std::sqrt(pixelValues.size()));
            }

            pixelValues.clear();
        }
    }

    std::cout << "pixel covreage : " << pixelCovreage(output) << std::endl;

    Mat fullMedian(output.size(), output.type());
    cv::medianBlur(output, fullMedian, 1);

    for (int i = 0; i < output.rows ; ++i) {
        for (int j = 0; j < output.cols; ++j) {
            if (output.at<uchar>(i,j) == 0) {
                output.at<uchar>(i,j) = fullMedian.at<uchar>(i,j);
            }
        }
    }
    util::printImage(output, std::string("tada"));

    return std::pair<Mat, Mat>(output, medianWeights);
}