Пример #1
0
void ImproveContoursPlugin::ProcessImage_static(ImagePlus* img, IplImage* &gray, float alpha, float beta, float gamma, CvSize win, int scheme, bool useBlur, int max_iter, float epsilon)
{
    CvSeq *seq;
	CvPoint* ps;
    if (!gray)
		gray = cvCreateImage( cvSize(img->orig->width, img->orig->height), IPL_DEPTH_8U, 1 );
	cvCvtColor(img->orig, gray, CV_BGR2GRAY);
	if (useBlur)
    {
        IplImage* temp = cvCreateImage( cvSize(gray->width, gray->height), IPL_DEPTH_8U, 1 );
        cvCopyImage(gray, temp);
        cvSmooth(temp, gray, CV_MEDIAN, 3);
        cvReleaseImage(&temp);
    }
	for (int i=(int)img->contourArray.size()-1; i>=0; i--)
	{
		seq = img->contourArray[i];
		int np = seq->total;
		ps = (CvPoint*)malloc( np*sizeof(CvPoint) );
		cvCvtSeqToArray(seq, ps);

		uchar *data;
        CvSize size;
        int step;
		cvGetRawData( gray, &data, &step, &size );
        if( gray == NULL )
            std::cout << "Err1" << std::endl;
        if( (size.height <= 0) || (size.width <= 0) )
            std::cout << "Err2" << std::endl;
        if( step < size.width )
            std::cout << "Err3" << std::endl;
        if( ps == NULL )
            std::cout << "Err4" << std::endl;
        if( np < 3 )
            std::cout << "Err5" << std::endl;
        if( &alpha == NULL )
            std::cout << "Err6" << std::endl;
        if( &beta == NULL )
            std::cout << "Err7" << std::endl;
        if( &gamma == NULL )
            std::cout << "Err8" << std::endl;
        if( CV_VALUE != CV_VALUE && CV_VALUE != CV_ARRAY )
            std::cout << "Err9" << std::endl;
        if( (win.height <= 0) || (!(win.height & 1)))
            std::cout << "Err10 " << win.height << std::endl;
        if( (win.width <= 0) || (!(win.width & 1)))
            std::cout << "Err11" << std::endl;


		CvTermCriteria term=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, max_iter, epsilon*np);
		cvSnakeImage( gray, ps, np, &alpha, &beta, &gamma, CV_VALUE, win, term, scheme );
		img->ReplaceContour(i, ps, np);
		free(ps); ps=NULL;
	}
}
Пример #2
0
 void SnakeSegmentation::segment(
   const sensor_msgs::Image::ConstPtr& image_msg)
 {
   vital_checker_->poke();
   boost::mutex::scoped_lock lock(mutex_);
   cv::Mat input = cv_bridge::toCvCopy(
     image_msg, sensor_msgs::image_encodings::MONO8)->image;
   // snake is only supported in legacy format
   IplImage ipl_input = input;
   float alpha = alpha_;
   float beta = beta_;
   float gamma = gamma_;
   CvTermCriteria criteria;
   criteria.type = CV_TERMCRIT_ITER;
   criteria.max_iter = max_iterations_;
   criteria.epsilon = epsilon_;
   int coeffUsage = CV_VALUE;
   CvSize win = cv::Size(window_size_, window_size_);
   int calcGradient = 1;
   std::vector<CvPoint> points;
   // all the points on  edge of  border of image
   const int resolution = 20;
   for (size_t i = 0; i < resolution; i++) {
     double r = (double)i/resolution;
     points.push_back(cvPoint(0, r * image_msg->height));
   }
   for (size_t i = 0; i < resolution; i++) {
     double r = (double)i/resolution;
     points.push_back(cvPoint(image_msg->width * r, image_msg->height));
   }
   for (size_t i = 0; i < resolution; i++) {
     double r = (double)i/resolution;
     points.push_back(cvPoint(image_msg->width, image_msg->height * (1 - r)));
   }
   for (size_t i = 0; i < resolution; i++) {
     double r = (double)i/resolution;
     points.push_back(cvPoint(image_msg->width * (1 - r), 0));
   }
   cvSnakeImage(&ipl_input, &(points[0]), points.size(), &alpha, &beta, &gamma,
                coeffUsage, win, criteria, calcGradient);
   cv::Mat debug_image;
   cv::cvtColor(input, debug_image, CV_GRAY2BGR);
   for (int i = 1; i < points.size(); i++) {
     cv::line(debug_image, points[i - 1], points[i], cv::Scalar(0, 100, 0), 2);
     cv::circle(debug_image, points[i], 2, cv::Scalar(100, 0, 0), 1);
   }
   pub_debug_.publish(cv_bridge::CvImage(
                        image_msg->header,
                        sensor_msgs::image_encodings::BGR8,
                        debug_image).toImageMsg());
 }
Пример #3
0
std::vector<CvPoint> cvSnakeImage_Shadow( const CvMat * image, std::vector<CvPoint>  points,
		std::vector<float> alpha, std::vector<float> beta, 
		std::vector<float> gamma, 
		CvSize win, CvTermCriteria criteria, int calc_gradient ){
	IplImage ipl_stub;
	CV_FUNCNAME("cvSnakeImage_Shadow");
	
	__BEGIN__;

	cvSnakeImage( cvGetImage(image, &ipl_stub), &(points[0]), points.size(), 
			      &((alpha)[0]), &((beta)[0]), &((gamma)[0]), 
				  (alpha.size()>1 && beta.size()>1 && gamma.size()>1 ? CV_ARRAY : CV_VALUE), 
				  win, criteria, calc_gradient );

	__END__;
	return points;
}
Пример #4
0
void KTexture::setTrackData(const char *filepath)
{
	Parameter snake_param = {0.45, 0.35, 0.2};
	IplImage *src = cvLoadImage(filepath, CV_LOAD_IMAGE_GRAYSCALE);
	IplImage *dst = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);
	//cvInitFont(&font, CV_FONT_HERSHEY_DUPLEX, 0.7, 0.7);
	CvPoint center;
	center.x = src->width / 2;
	center.y = src->height / 2;
	int length = 50;
	CvPoint *contour = (CvPoint *)cvAlloc(sizeof(CvPoint) * length);
	for (int i = 0; i < length; i++) {
		contour[i].x = (int)(center.x * cos(2 * CV_PI * i / length) + center.x);
		contour[i].y = (int)(center.y * sin(2 * CV_PI * i / length) + center.y);
	}
	cvCvtColor(src, dst, CV_GRAY2RGB);
	for (int i = 0; i < length - 1; i++) {
		cvLine(dst, contour[i], contour[i + 1], CV_RGB(255, 0, 0), 2, 8, 0);
	}
	for (int i = 0; i < 50; i++) {
		cvSnakeImage(src, contour, length, &snake_param.alpha, &snake_param.beta, &snake_param.gamma,
					 CV_VALUE, cvSize(15, 15), cvTermCriteria (CV_TERMCRIT_ITER, 1, 0.0), 1);
		cvCvtColor(src, dst, CV_GRAY2RGB);
		for (int j = 0; j < length - 1; j++) {
			cvLine(dst, contour[j], contour[j + 1], CV_RGB(255, 0, 0), 2);
		}
		cvLine(dst, contour[length - 1], contour[0], CV_RGB(255, 0, 0), 2);
		//cvPutText (dst_img, iter, cvPoint (15, 30), &font, CV_RGB (0, 0, 255));
	}
	for (int i = 0; i < length; i++) {
		fprintf(stderr, "(x, y) = (%d, %d)\n", contour[i].x, contour[i].y);
	}
	//cvNamedWindow ("Snakes", CV_WINDOW_AUTOSIZE);
	//cvShowImage("Snakes", dst);
	//int c = cvWaitKey (0);
}
Пример #5
0
use_cvSnakeImage::use_cvSnakeImage(IplImage *source)
{
    int i, j = 0, c;
      IplImage *src_img, *dst_img;

      CvPoint *contour;
      CvPoint center;
      int length = 60;              // 動的輪郭を構成する点数
      Parameter snake_param = { 0.45, 0.35, 0.2 };  // cvSnakeImage のパラメータ
      CvFont font;
      char iter[8];

      src_img=source;
      // (1)画像を読み込む
     // if (argc < 2 || (src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_GRAYSCALE)) == 0)
        //return -1;

      //char *m="/home/vinhbx/Desktop/QT App/ScanCode/out.png";
        //char *m="/home/vinhbx/Desktop/a.png";
        //char *m="/home/vinhbx/Desktop/bowl.jpeg";
        //src_img = cvLoadImage (m, CV_LOAD_IMAGE_GRAYSCALE);

      dst_img = cvCreateImage (cvGetSize (src_img), IPL_DEPTH_8U, 3);

      cvInitFont (&font, CV_FONT_HERSHEY_DUPLEX, 0.7, 0.7);
      center.x = src_img->width / 2;
      center.y = src_img->height / 2;
      // (2)動的輪郭の初期化
      contour = (CvPoint *) cvAlloc (sizeof (CvPoint) * length);
      for (i = 0; i < length; i++) {
        contour[i].x = (int) (center.x * cos (2 * CV_PI * i / length) + center.x);
        contour[i].y = (int) (center.y * sin (2 * CV_PI * i / length) + center.y);
      }

      // (3)初期輪郭の描画
      cvCvtColor (src_img, dst_img, CV_GRAY2RGB);
      for (i = 0; i < length - 1; i++) {
        cvLine (dst_img, contour[i], contour[i + 1], CV_RGB (255, 0, 0), 2, 8, 0);
      }
      cvLine (dst_img, contour[length - 1], contour[0], CV_RGB (255, 0, 0), 2, 8, 0);

      cvNamedWindow ("Snakes", CV_WINDOW_NORMAL);
      cvShowImage ("Snakes", dst_img);
      //cvWaitKey (0);

      // 動的輪郭の収束計算(過程を表示する)
      while (j<100)
      {

        // (4)動的輪郭の輪郭計算
        cvSnakeImage (src_img, contour, length, &snake_param.alpha, &snake_param.beta, &snake_param.gamma,
                      CV_VALUE, cvSize (15, 15), cvTermCriteria (CV_TERMCRIT_ITER, 1, 0.0), 1);

        // (5)計算された動的輪郭の描画
        cvCvtColor (src_img, dst_img, CV_GRAY2RGB);
        for (i = 0; i < length - 1; i++) {
          cvLine (dst_img, contour[i], contour[i + 1], CV_RGB (255, 0, 0), 2);
        }
        cvLine (dst_img, contour[length - 1], contour[0], CV_RGB (255, 0, 0), 2);
        snprintf (iter, 8, "%03d", ++j);
        cvPutText (dst_img, iter, cvPoint (15, 30), &font, CV_RGB (0, 0, 255));

        // (6)結果の表示
        cvShowImage ("Snakes", dst_img);
        c = cvWaitKey (0);
        if (c == '\x1b')
          break;
      }

      cvDestroyWindow ("Snakes");
      cvReleaseImage (&src_img);
      cvReleaseImage (&dst_img);
}
Пример #6
0
int main(int argc,char* argv[])
{
	IplImage * srcimage = NULL;
	if ( argc == 2)
		srcimage = cvLoadImage((char *)argv[1], CV_LOAD_IMAGE_GRAYSCALE);
	else
		srcimage = cvLoadImage("snake_test.jpg", CV_LOAD_IMAGE_GRAYSCALE);  
	
	if(srcimage==NULL){
		std::cout<<"error file name"<<std::endl;
		return -1;
	}

	InitContour.clear();
	cvNamedWindow("srcImage");	
	cvShowImage("srcImage", srcimage);
	cvSetMouseCallback("srcImage", on_mouse,srcimage);

	cvWaitKey(); //等待迭代开始

	float alpha=0.01;
	float beta=0.1;
	float gamma=0.1;

	CvSize size;
	size.width=3;
	size.height=3;
	
	CvTermCriteria criteria;
	criteria.type=CV_TERMCRIT_ITER;
	criteria.max_iter=5;
	criteria.epsilon=0.1;
	int itetime =500 ;
	for(int ite=0;ite<itetime;ite++)
	{
		//对边界点查值,使所有边界点距离为(1-2)
		ReGeneralPtArrs(InitContour,InitContour);
		//将边界点的平均距离改为3;根据我的经验,距离大一点效果要好一些
		removeNearPoints(InitContour,InitContour,3);
		CvPoint*  pts = new  CvPoint[InitContour.size()];
		for (int i=0;i<InitContour.size();i++)
		{
			pts[i] = InitContour[i];
		}
		if(InitContour.size()<5){
			printf("迭代失败!\n");
			break;
		}

		cvSnakeImage( srcimage, pts,InitContour.size(),&alpha,&beta,&gamma,CV_VALUE,size,criteria,1);

		int size = InitContour.size();
		InitContour.clear();
		for (int i=0;i<size;i++)
		{
			if(pts[i].x<0||pts[i].x>=srcimage->width)
				continue;
			if(pts[i].y<0||pts[i].y>=srcimage->height)
				continue;

			InitContour.push_back(pts[i]);

		}

		showContent(srcimage);
		cvWaitKey(20);
		Sleep(200);
		printf("迭代了 %d 次\n",ite);
		delete []pts;	

		char c = cvWaitKey(1);
		if ((c&255)==27)
			break;
	}

    removeNearPoints(InitContour,InitContour,6);
	showContent(srcimage);
	cvWaitKey();

	cvReleaseImage(&srcimage);
	cvReleaseImage(&temp);

	return 0;
}
Пример #7
0
 void GTVideo::snakeTracking()
 {
     if(foregroundMask.isEmpty() || abnormallist.isEmpty())
     {
         qDebug() << "Video source and initial abnormal range must be set before tracking\n";
         return;
     }

     //initialize the groundtruth
     cv::Mat eye = foregroundMask.at(0);
     eye.setTo(cv::Scalar(0)); //cv::Scalar(0,0,0)
     grdtruth.fill(eye);

     for (int iAb=0; iAb<abnormallist.size(); iAb++)
     {
         uint start = abnormallist[iAb].getStart();
         uint end = abnormallist[iAb].getEnd();
         int length = end-start+1;
         const QVector<cv::Point>& boundaryPoints = abnormallist[iAb].getBoundaryPoints();

         // consctruct a new array of type CvPoint because it will be modified for each frame
         const int npts = boundaryPoints.size();
         CvPoint pts_snake[npts];
         for (int i=0; i<npts; i++)
         {
             pts_snake[i] = boundaryPoints[i];
         }

         // set parameters for cvSnakeImage()
         float alpha = 0.5f;
         float beta = 0.5f;
         float gamma = 0.5f;
         int coeff_usage = CV_VALUE;
         CvSize win = cvSize(21,21);
         CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.5);

         // set tracked object as abnormal ROI
         for (uint iFrame=start; iFrame<=end; iFrame++)
         {
             // update boundary using that in previous frame
             cv::Mat grayFrame(foregroundMask[iFrame]);
             //cv::cvtColor(foregroundMask[iFrame], grayFrame, CV_RGB2GRAY);
             IplImage *ipFrame = new IplImage(grayFrame);
             cvSnakeImage(ipFrame, pts_snake, npts, &alpha, &beta, &gamma, coeff_usage, win, criteria, 1);

             cvSaveImage("frame.tif", ipFrame);


             // convert boundary points from CvPoint[] to vector<Point>
             std::vector<cv::Point> stdBoundPoints;
             for (int i=0; i<npts; i++)
             {
                 cv::Point p(pts_snake[i].x, pts_snake[i].y);
                 stdBoundPoints.push_back(p);
             }

             // fill the empty grayFrame using popygon to get roi
             cv::Mat roi(foregroundMask[iFrame]);
             //cv::cvtColor(foregroundMask[iFrame], roi, CV_RGB2GRAY);
             roi.setTo(cv::Scalar(0)); //cv::Scalar(0,0,0)
             //cv::fillPoly(roi, stdBoundPoints, cv::Scalar(0)); //cv::Scalar(255,255,255)
             const cv::Point *pAddBoundary = stdBoundPoints.data();
             const cv::Point **pBoundaryPoints = &pAddBoundary;
             cv::fillPoly(roi, pBoundaryPoints, &npts, 1, cv::Scalar(255));  //cv::Scalar(255,255,255)

             setGroundtruth(roi, iFrame);

             delete ipFrame;


             cv::imwrite("output.tif", roi);
         }
     }
 }