int main( int argc, char** argv )
{
  CommandLineParser parser( argc, argv, keys );

  String tracker_algorithm = parser.get<String>( 0 );
  String video_name = parser.get<String>( 1 );

  if( tracker_algorithm.empty() || video_name.empty() )
  {
    help();
    return -1;
  }

  //open the capture
  VideoCapture cap;
  cap.open( video_name );

  if( !cap.isOpened() )
  {
    help();
    cout << "***Could not initialize capturing...***\n";
    cout << "Current parameter's value: \n";
    parser.printMessage();
    return -1;
  }

  Mat frame;
  paused = true;
  namedWindow( "Tracking API", 1 );
  setMouseCallback( "Tracking API", onMouse, 0 );

  //instantiates the specific Tracker
  Ptr<Tracker> tracker = Tracker::create( tracker_algorithm );
  if( tracker == NULL )
  {
    cout << "***Error in the instantiation of the tracker...***\n";
    return -1;
  }

  //get the first frame
  cap >> frame;
  frame.copyTo( image );
  imshow( "Tracking API", image );

  bool initialized = false;
  for ( ;; )
  {
    if( !paused )
    {
      cap >> frame;
      frame.copyTo( image );

      if( !initialized && selectObject )
      {
        //initializes the tracker
        if( !tracker->init( frame, boundingBox ) )
        {
          cout << "***Could not initialize tracker...***\n";
          return -1;
        }
        initialized = true;
      }
      else if( initialized )
      {
        //updates the tracker
        if( tracker->update( frame, boundingBox ) )
        {
          rectangle( image, boundingBox, Scalar( 255, 0, 0 ), 2, 1 );
        }
      }
      imshow( "Tracking API", image );
    }

    char c = (char) waitKey( 2 );
    if( c == 'q' )
      break;
    if( c == 'p' )
      paused = !paused;

  }
Esempio n. 2
0
int main(int argc, char *argv[])
{
    CommandLineParser parser(argc, argv, keys);
    string datasetRootPath = parser.get<string>(0);
    int datasetID = parser.get<int>(1);

    if (datasetRootPath.empty())
    {
        help();
        return -1;
    }

    Mat frame;
    paused = false;
    namedWindow("GOTURN Tracking", 0);
    setMouseCallback("GOTURN Tracking", onMouse, 0);

    //Create GOTURN tracker
    Ptr<Tracker> tracker = Tracker::create("GOTURN");

    //Load and init full ALOV300++ dataset with a given datasetID, as alternative you can use loadAnnotatedOnly(..)
    //to load only frames with labled ground truth ~ every 5-th frame
    Ptr<cv::datasets::TRACK_alov> dataset = TRACK_alov::create();
    dataset->load(datasetRootPath);
    dataset->initDataset(datasetID);

    //Read first frame
    dataset->getNextFrame(frame);
    frame.copyTo(image);
    rectangle(image, boundingBox, Scalar(255, 0, 0), 2, 1);
    imshow("GOTURN Tracking", image);

    bool initialized = false;
    paused = true;
    int frameCounter = 0;

    //Time measurment
    int64 e3 = getTickCount();

    for (;;)
    {
        if (!paused)
        {
            //Time measurment
            int64 e1 = getTickCount();
            if (initialized){
                if (!dataset->getNextFrame(frame))
                    break;
                frame.copyTo(image);
            }

            if (!initialized && selectObjects)
            {
                //Initialize the tracker and add targets
                if (!tracker->init(frame, boundingBox))
                {
                    cout << "Tracker Init Error!!!";
                    return 0;
                }
                rectangle(frame, boundingBox, Scalar(0, 0, 255), 2, 1);
                initialized = true;
            }
            else if (initialized)
            {
                //Update all targets
                if (tracker->update(frame, boundingBox))
                {
                    rectangle(frame, boundingBox, Scalar(0, 0, 255), 2, 1);
                }
            }
            imshow("GOTURN Tracking", frame);
            frameCounter++;
            //Time measurment
            int64 e2 = getTickCount();
            double t1 = (e2 - e1) / getTickFrequency();
            cout << frameCounter << "\tframe :  " << t1 * 1000.0 << "ms" << endl;
        }

        char c = (char)waitKey(2);
        if (c == 'q')
            break;
        if (c == 'p')
            paused = !paused;
    }

    //Time measurment
    int64 e4 = getTickCount();
    double t2 = (e4 - e3) / getTickFrequency();
    cout << "Average Time for Frame:  " << t2 * 1000.0 / frameCounter << "ms" << endl;
    cout << "Average FPS:  " << 1.0 / t2*frameCounter << endl;


    waitKey(0);

    return 0;
}
Esempio n. 3
0
int main(int argc,char **argv)
{
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
	  //Deepak changed the index from 0 to 1
	  //            TheVideoCapturer.open(0);
            TheVideoCapturer.open(1);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);


        //Create gui

        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);

        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab())
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured
            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;

            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
                cout<<TheMarkers[i]<<endl;
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
            }
	    
	    if (TheMarkers.size() >= 4){
	      try{	    
		cv::Mat t0 = TheMarkers[0].Tvec;
		cv::Mat t1 = TheMarkers[1].Tvec;
		cv::Mat t2 = TheMarkers[2].Tvec;
		cv::Mat t3 = TheMarkers[3].Tvec;

		float x_len = (t0.at<float>(0,0) - t1.at<float>(0,0));
		float y_len = (t0.at<float>(1,0) - t1.at<float>(1,0));
		float z_len = (t0.at<float>(2,0) - t1.at<float>(2,0));
		float distancesq_len = x_len*x_len+y_len*y_len+z_len*z_len;
		float distance_len = sqrt(distancesq_len);

		float x_width = (t1.at<float>(0,0) - t2.at<float>(0,0));
		float y_width = (t1.at<float>(1,0) - t2.at<float>(1,0));
		float z_width = (t1.at<float>(2,0) - t2.at<float>(2,0));
		float distancesq_width = x_width*x_width+y_width*y_width+z_width*z_width;
		float distance_width = sqrt(distancesq_width);

		cout<<"Length:"<<distance_len<<endl;
		cout<<"Width:"<<distance_width<<endl;
		cout<<"Perimeter:"<<(2*distance_len + 2*distance_width)<<endl;
		cout<<"Area:"<<distance_len*distance_width<<endl;
	      }
	      catch (std::exception& e){
		cout<<e.what()<<endl;
	      }
	    }
	    
	   
            
	    

	    
            //print other rectangles that contains no valid markers
            // for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
            //     aruco::Marker m( MDetector.getCandidates()[i],999);
            //     m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            // }




            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                }
            //DONE! Easy, right?
            cout<<endl<<endl<<endl;
            //show input with augmented information and  the thresholded image
            cv::imshow("in",TheInputImageCopy);
            cv::imshow("thres",MDetector.getThresholdedImage());

            key=cv::waitKey(waitTime);//wait for key to be pressed
        }

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

}
Esempio n. 4
0
vector<string> showCharSelection(Mat image, vector<Rect> charRegions, string state)
{
  int curCharIdx = 0;

  vector<string> humanInputs(charRegions.size());
  for (int i = 0; i < charRegions.size(); i++)
    humanInputs[i] = SPACE;

  RegexRule regex_rule("", "[\\pL\\pN]", "", "");
  
  int16_t waitkey = waitKey(50);
  while (waitkey != ENTER_KEY_ONE && waitkey != ENTER_KEY_TWO && waitkey != ESCAPE_KEY)
  {
    Mat imgCopy(image.size(), image.type());
    image.copyTo(imgCopy);
    cvtColor(imgCopy, imgCopy, CV_GRAY2BGR);

    rectangle(imgCopy, charRegions[curCharIdx], Scalar(0, 255, 0), 1);

    imshow("Character selector", imgCopy);

    if ((char) waitkey == LEFT_ARROW_KEY)
      curCharIdx--;
    else if ((char) waitkey == RIGHT_ARROW_KEY)
      curCharIdx++;
    else if (waitkey == SPACE_KEY)
    {
      humanInputs[curCharIdx] = " ";
      curCharIdx++;
    }
    else if (waitkey > 0 && regex_rule.match(utf8chr(waitkey))) // Verify that it's an actual character
    {
      // Save the character to disk
      humanInputs[curCharIdx] = utf8chr(waitkey);
      curCharIdx++;

      if (curCharIdx >= charRegions.size())
      {
        waitkey = ENTER_KEY_ONE;
        break;
      }
    }

    if (curCharIdx < 0)
      curCharIdx = 0;
    if (curCharIdx >= charRegions.size())
      curCharIdx = charRegions.size() -1;

    waitkey = waitKey(50);
  }

  if (waitkey == ENTER_KEY_ONE || waitkey == ENTER_KEY_TWO)
  {
    // Save all the inputs
    for (int i = 0; i < charRegions.size(); i++)
    {
      if (humanInputs[i] != SPACE)
        cout << "Tagged " << state << " char code: '" << humanInputs[i] << "' at char position: " << i << endl;
    }
  }

  destroyWindow("Character selector");

  return humanInputs;
}
Esempio n. 5
0
int main(int argc,char **argv)
{
    try
    {
        if (  readArguments (argc,argv)==false) return 0;
//parse arguments
        TheBoardConfig.readFromFile(TheBoardConfigFile);
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //Open outputvideo
        if ( TheOutVideoFilePath!="")
	  VWriter.open(TheOutVideoFilePath,VideoWriter::fourcc('M','J','P','G'),15,TheInputImage.size());

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }

        //Create gui

        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
	TheBoardDetector.setParams(TheBoardConfig,TheCameraParameters,TheMarkerSize);
	TheBoardDetector.getMarkerDetector().getThresholdParams( ThresParam1,ThresParam2);
	TheBoardDetector.getMarkerDetector().enableErosion(true);//for chessboards
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);
        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab())
        {
            TheVideoCapturer.retrieve( TheInputImage);
            TheInputImage.copyTo(TheInputImageCopy);
            index++; //number of images captured
            double tick = (double)getTickCount();//for checking the speed
            //Detection of the board
            float probDetect=TheBoardDetector.detect(TheInputImage);
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
            //print marker borders
            for (unsigned int i=0;i<TheBoardDetector.getDetectedMarkers().size();i++)
                TheBoardDetector.getDetectedMarkers()[i].draw(TheInputImageCopy,Scalar(0,0,255),1);

            //print board
             if (TheCameraParameters.isValid()) {
                if ( probDetect>0.2)   {
                    CvDrawingUtils::draw3dAxis( TheInputImageCopy,TheBoardDetector.getDetectedBoard(),TheCameraParameters);
                    //draw3dBoardCube( TheInputImageCopy,TheBoardDetected,TheIntriscCameraMatrix,TheDistorsionCameraParams);
                }
            }
            //DONE! Easy, right?

            //show input with augmented information and  the thresholded image
            cv::imshow("in",TheInputImageCopy);
            cv::imshow("thres",TheBoardDetector.getMarkerDetector().getThresholdedImage());
            //write to video if required
            if (  TheOutVideoFilePath!="") {
                //create a beautiful compiosed image showing the thresholded
                //first create a small version of the thresholded image
                cv::Mat smallThres;
                cv::resize( TheBoardDetector.getMarkerDetector().getThresholdedImage(),smallThres,cvSize(TheInputImageCopy.cols/3,TheInputImageCopy.rows/3));
                cv::Mat small3C;
                cv::cvtColor(smallThres,small3C,COLOR_GRAY2BGR);
                cv::Mat roi=TheInputImageCopy(cv::Rect(0,0,TheInputImageCopy.cols/3,TheInputImageCopy.rows/3));
                small3C.copyTo(roi);
                VWriter<<TheInputImageCopy;
// 			 cv::imshow("TheInputImageCopy",TheInputImageCopy);

            }

            key=cv::waitKey(waitTime);//wait for key to be pressed
            processKey(key);
        }


    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

}
Esempio n. 6
0
// Takes a directory full of single char images, and plops them on a big tif files
// Also creates a box file so Tesseract can recognize it
int main( int argc, const char** argv )
{
  string inDir;
  int tile_width;
  int tile_height;

  TCLAP::CmdLine cmd("OpenAlpr OCR Training Prep Utility", ' ', "1.0.0");

  TCLAP::UnlabeledValueArg<std::string>  inputDirArg( "input_dir", "Folder containing individual character images", true, "", "input_dir_path"  );

  
  TCLAP::ValueArg<int> tileWidthArg("","tile_width","Width (in pixels) for each character tile.  Default=50",false, 50 ,"tile_width_px");
  TCLAP::ValueArg<int> tileHeightArg("","tile_height","Height (in pixels) for each character tile.  Default=60",false, 60 ,"tile_height_px");
  
  try
  {
    cmd.add( inputDirArg );
    cmd.add( tileWidthArg );
    cmd.add( tileHeightArg );

    
    if (cmd.parse( argc, argv ) == false)
    {
      // Error occured while parsing.  Exit now.
      return 1;
    }

    inDir = inputDirArg.getValue();
    tile_width = tileWidthArg.getValue();
    tile_height = tileHeightArg.getValue();
    
  }
  catch (TCLAP::ArgException &e)    // catch any exceptions
  {
    std::cerr << "error: " << e.error() << " for arg " << e.argId() << std::endl;
    return 1;
  }
  
  

  if (DirectoryExists(inDir.c_str()) == false)
  {
    printf("Input dir does not exist\n");
    return 1;
  }



  const int CHAR_PADDING_HORIZONTAL = 0;
  const int CHAR_PADDING_VERTICAL = 0;

  const int X_OFFSET = 5;
  const int Y_OFFSET = 5;

  const int PAGE_MARGIN_X = 70;
  const int PAGE_MARGIN_Y = 70;
  const int HORIZONTAL_RESOLUTION = 3500;
  const int MAX_VERTICAL_RESOLUTION = 6000; // Maximum vertical size before chopping into additional pages.

  const int TILE_WIDTH = tile_width;
  const int TILE_HEIGHT = tile_height;

  const int FIXED_CHAR_HEIGHT = 40; // RESIZE all characters to this height

  vector<string> files = getFilesInDir(inDir.c_str());

  sort( files.begin(), files.end(), stringCompare );

  for (int i = 0; i< files.size(); i++)
  {
    if (hasEnding(files[i], ".png") || hasEnding(files[i], ".jpg"))
    {

    }
    else
    {
      std::cerr << "Non-image file detected in this directory.  This must be removed first" << std::endl;
      return 1;
    }
  }


  int tiles_per_row = ((float) (HORIZONTAL_RESOLUTION - (PAGE_MARGIN_X * 2))) / ((float) TILE_WIDTH) + 1;
  int lines = files.size() / (tiles_per_row);
  int vertical_resolution = ((lines + 1) * TILE_HEIGHT) + (PAGE_MARGIN_Y * 2) ;
  cout << tiles_per_row <<   " : " << vertical_resolution << endl;

  Mat bigTif = Mat::zeros(Size(HORIZONTAL_RESOLUTION, vertical_resolution), CV_8U);
  bitwise_not(bigTif, bigTif);

  stringstream boxFileOut;

  for (int i = 0; i< files.size(); i++)
  {
    int col = i % tiles_per_row;
    int line = i / tiles_per_row;

    int xPos = (col * TILE_WIDTH) + PAGE_MARGIN_X;
    int yPos = (line * TILE_HEIGHT) + PAGE_MARGIN_Y;
    
    if (hasEnding(files[i], ".png") || hasEnding(files[i], ".jpg"))
    {
      string fullpath = inDir + "/" + files[i];

      cout << "Processing file: " << (i + 1) << " of " << files.size() << " (" << files[i] << ")" << endl;

      string::iterator utf_iterator = files[i].begin();
      int cp = utf8::next(utf_iterator, files[i].end());
      string charcode = utf8chr(cp);

      Mat characterImg = imread(fullpath);


      Mat charImgCopy = Mat::zeros(Size(150, 150), characterImg.type());
      bitwise_not(charImgCopy, charImgCopy);

      characterImg.copyTo(charImgCopy(Rect(X_OFFSET, Y_OFFSET, characterImg.cols, characterImg.rows)));
      cvtColor(charImgCopy, charImgCopy, CV_BGR2GRAY);
      bitwise_not(charImgCopy, charImgCopy);

      vector<vector<Point> > contours;

      //imshow("copy", charImgCopy);
      findContours(charImgCopy, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

      float minHeightPercent = 0.35;
      int minHeight = (int) (((float) characterImg.rows) * minHeightPercent);

      vector<Rect> tallEnoughRects;
      for (int c = 0; c < contours.size(); c++)
      {
        Rect tmpRect = boundingRect(contours[c]);
        if (tmpRect.height > minHeight)
          tallEnoughRects.push_back( tmpRect );
      }

      int xMin = 9999999, xMax = 0, yMin = 9999999, yMax = 0;
      // Combine all the "tall enough" rectangles into one super rectangle
      for (int r = 0; r < tallEnoughRects.size(); r++)
      {
        if (tallEnoughRects[r].x < xMin)
          xMin = tallEnoughRects[r].x;
        if (tallEnoughRects[r].y < yMin)
          yMin = tallEnoughRects[r].y;
        if (tallEnoughRects[r].x + tallEnoughRects[r].width > xMax)
          xMax = tallEnoughRects[r].x + tallEnoughRects[r].width;
        if (tallEnoughRects[r].y + tallEnoughRects[r].height > yMax)
          yMax = tallEnoughRects[r].y + tallEnoughRects[r].height;
      }

      Rect tallestRect(xMin, yMin, xMax - xMin, yMax - yMin);


      //cout << tallestRect.x << ":" << tallestRect.y << " -- " << tallestRect.width << ":" << tallestRect.height << endl;

      Rect cropRect(0, tallestRect.y - Y_OFFSET, tallestRect.width, tallestRect.height);

      //cout << "Cropped: " << cropRect.x << ":" << cropRect.y << " -- " << cropRect.width << ":" << cropRect.height << endl;
      Mat cropped(characterImg, cropRect);
      cvtColor(cropped, cropped, CV_BGR2GRAY);

      Rect destinationRect(xPos, yPos, tallestRect.width, tallestRect.height);
      //cout << "1" << endl;

      cropped.copyTo(bigTif(destinationRect));

      int x1 = destinationRect.x - CHAR_PADDING_HORIZONTAL;
      int y1 = (vertical_resolution - destinationRect.y - destinationRect.height) - CHAR_PADDING_VERTICAL;
      int x2 = (destinationRect.x + destinationRect.width) + CHAR_PADDING_HORIZONTAL;
      int y2 = (vertical_resolution - destinationRect.y) + CHAR_PADDING_VERTICAL;
      //0 70 5602 85 5636 0
      boxFileOut << charcode << " " << x1 << " " << y1 << " ";
      boxFileOut << x2 << " " << y2 ;
      boxFileOut << " 0" << endl;

      //rectangle(characterImg, tallestRect, Scalar(0, 255, 0));
      //imshow("characterImg", cropped);

      waitKey(2);
    }
  }

  imwrite("combined.tif", bigTif);
  ofstream boxFile("combined.box", std::ios::out);
  boxFile << boxFileOut.str();
  
}
	//Called on each new image from the topic
  void rgbCallback(const sensor_msgs::ImageConstPtr& msg_ptr) {
    
	//Convert the image to OpenCV form
    try {
      ptr_rgb = cv_bridge::toCvCopy(msg_ptr, "bgr8");
      ptr_rgb->image.copyTo(*rgb_image);
    } catch (cv_bridge::Exception& e) {
      ROS_ERROR("cv_bridge exception: %s", e.what());
    }
    waitKey(2);

	//Create a test image
     if(first)
      {
	imwrite("test.jpg", *rgb_image);
	//	cvSaveImage("test.jpg" ,cv_image);
	first = false;
      }
    //try {
    //ptr_hue = cv_bridge::toCvCopy(msg_ptr, "8UC1");
    //imshow(ptr_ccs->image);
    //ptr_hue->image.copyTo(*hue_image);
    //} catch (cv_bridge::Exception& e) {
    //	ROS_ERROR("cv_bridge exception: %s", e.what());
    //}

	//Create a HSV image from the RGB image
    cvtColor(*rgb_image, *hsv_image, CV_BGR2HSV);

	//Split the three image channels into separate matrices 
    vector <Mat> planes;
    split(*hsv_image, planes);
    *hue_image = planes[0];
    *sat_image = planes[1];
    *int_image = planes[2];

	//Upper and lower bounds on hue values
    int rangeMin = (mean_color - window_size)%255;
    int rangeMax = (mean_color + window_size)%255;

    //int otherRangeMin = (other_mean_color - window_size)%255;
    //int otherRangeMax = (other_mean_color + window_size)%255;
	
    if(rangeMin > rangeMax){
      int temp = rangeMax;
      rangeMax = rangeMin;
	//It looks like the should be =temp
      rangeMin = rangeMax;
    }
/*
    if(otherRangeMin > otherRangeMax){
      int temp = otherRangeMax;
      otherRangeMax = otherRangeMin;
	//It looks like the should be =temp
      otherRangeMin = otherRangeMax;
    }
*/	
	//Create a binary image from the threshold
    inRange(*hue_image, Scalar((double)rangeMin),Scalar((double)rangeMax), *back_img);
    *color_cc_image = Scalar(0);

	//Apply blur
    back_img->copyTo(*hue_image);
    Size ksize = Size(2 * blurKernel + 1,2 * blurKernel + 1);
    GaussianBlur(*back_img, *blurred_image, ksize, -1, -1);

    //attempts at adaptive thresholding
    Mat* sat_image;	  //input image -> satuation space
    //adaptiveThreshold(*blurred_image, *temp_blurred_image, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 3, .5);
    //threshold(*blurred_image, *temp_blurred_image, THRESH_OTSU, 255, THRESH_BINARY); 

    threshold(*blurred_image, *temp_blurred_image, 110, 255, THRESH_BINARY); 
    convertScaleAbs(*temp_blurred_image, *back_img, 1, 0);
    hue_image->copyTo(*copy_image);

    if (display_image_){
      imshow(color_topic, *back_img);
    }
    getConnectedComponents();

    //Find Connected Components (this will populate the contour vector and perform ordering)
/*
    findContours(*back_img, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point());
    if(!contours.empty())
      numCC = getThresholdConnectedComponents();
	
    //I'm adding this
    if (contours.empty()){
      inRange(*hue_image, Scalar((double)otherRangeMin),Scalar((double)otherRangeMax), *back_img);
      *color_cc_image = Scalar(0);
      back_img->copyTo(*hue_image);
      Size ksize = Size(2 * blurKernel + 1,2 * blurKernel + 1);
      GaussianBlur(*back_img, *blurred_image, ksize, -1, -1);
      Mat* sat_image;
      threshold(*blurred_image, *temp_blurred_image, 110, 255, THRESH_BINARY); 
      convertScaleAbs(*temp_blurred_image, *back_img, 1, 0);
      hue_image->copyTo(*copy_image);
      findContours(*back_img, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point());
      if(!contours.empty())
        numCC = getThresholdConnectedComponents();
    }
*/
	//Draw connected components	
    for (int i = 0; (i < (int)comps.size()) && (comps[i].idx >= 0) && (comps[i].idx < (int)contours.size()); i++) {
      Scalar color( (rand()&255), (rand()&255), (rand()&255) );
      drawContours(*color_cc_image, contours, comps[i].idx, color, 3, 8, hierarchy,0, Point());
      drawContours(*hue_image, contours, 0, comps[i].idx, 3, 8, hierarchy,0, Point());
    }

   getMoments();

    blobTracker->updateKalmanFiltersConnectedComponents();
    if (numCC > 0)
      blobTracker->getFilteredBlobs(true);
    else
      blobTracker->getFilteredBlobs(false);
    //cv::imshow("cam",hue_image);
    //Draw Filtered Blobs
    
    RotatedRect box;
    Point pt;
    //Image_msgs::FeatureMoments moments;
    //moments.num = 0;
	/*
   for(int i=0; i < 1; i++) //replaced MAX_FEATURES with 1
      {
	FeatureBlob ftemp;
	blobTracker->filters_[i].getFiltered(ftemp);
	//cout << "valid? " << ftemp.getValid() << endl;
	if(ftemp.getValid() && display_image_)
	  {
	    //cout << "Should be displaying something!" << endl;
	    Mat firstTemp, secondTemp;
	    ftemp.getValues(firstTemp, secondTemp);
	    pt.x = firstTemp.at<float>(0,0); pt.y = firstTemp.at<float>(1,0);
	    blobTracker->getBoxFromCov(pt, secondTemp, box);

	    if (box.size.width > 0 && box.size.height > 0 && box.size.width < width && box.size.height < height)
	      {
	      	ellipse(*rgb_image, box, CV_RGB(0,0,255), 3, 8);
		circle(*rgb_image, pt, 3, CV_RGB(255, 0, 0), -1, 8);
	      }	    

	  }
      } */
	pt.x = pos_x;
	pt.y = pos_y; 
	circle(*rgb_image, pt, 3, CV_RGB(255, 0, 0), -1, 8);
	

    //image_pub_.publish(moments);
    if (display_image_) {
      imshow("Color Blobs", *rgb_image);
      imshow("Connected Components", *color_cc_image);
    }
    waitKey(2);
  }
Esempio n. 8
0
        void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
                     const Parameters& params, vector<int>& inliers, Mat& rvec, Mat& tvec,
                     const Mat& rvecInit, const Mat& tvecInit, Mutex& resultsMutex)
        {
            Mat modelObjectPoints(1, MIN_POINTS_COUNT, CV_32FC3), modelImagePoints(1, MIN_POINTS_COUNT, CV_32FC2);
            for (int i = 0, colIndex = 0; i < (int)pointsMask.size(); i++)
            {
                if (pointsMask[i])
                {
                    Mat colModelImagePoints = modelImagePoints(Rect(colIndex, 0, 1, 1));
                    imagePoints.col(i).copyTo(colModelImagePoints);
                    Mat colModelObjectPoints = modelObjectPoints(Rect(colIndex, 0, 1, 1));
                    objectPoints.col(i).copyTo(colModelObjectPoints);
                    colIndex = colIndex+1;
                }
            }
            
            //filter same 3d points, hang in solvePnP
            double eps = 1e-10;
            int num_same_points = 0;
            for (int i = 0; i < MIN_POINTS_COUNT; i++)
                for (int j = i + 1; j < MIN_POINTS_COUNT; j++)
                {
                    if (norm(modelObjectPoints.at<Vec3f>(0, i) - modelObjectPoints.at<Vec3f>(0, j)) < eps)
                        num_same_points++;
                }
            if (num_same_points > 0)
                return;
            
            Mat localRvec, localTvec;
            rvecInit.copyTo(localRvec);
            tvecInit.copyTo(localTvec);
        
		    solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec,
				     params.useExtrinsicGuess, params.flags);
		
            
            vector<Point2f> projected_points;
            projected_points.resize(objectPoints.cols);
            projectPoints(objectPoints, localRvec, localTvec, params.camera.intrinsics, params.camera.distortion, projected_points);
            
            Mat rotatedPoints;
            project3dPoints(objectPoints, localRvec, localTvec, rotatedPoints);
            
            vector<int> localInliers;
            for (int i = 0; i < objectPoints.cols; i++)
            {
                Point2f p(imagePoints.at<Vec2f>(0, i)[0], imagePoints.at<Vec2f>(0, i)[1]);
                if ((norm(p - projected_points[i]) < params.reprojectionError)
                    && (rotatedPoints.at<Vec3f>(0, i)[2] > 0)) //hack
                {
                    localInliers.push_back(i);
                }
            }
            
            if (localInliers.size() > inliers.size())
            {
                resultsMutex.lock();
                
                inliers.clear();
                inliers.resize(localInliers.size());
                memcpy(&inliers[0], &localInliers[0], sizeof(int) * localInliers.size());
                localRvec.copyTo(rvec);
                localTvec.copyTo(tvec);
                
                resultsMutex.unlock();
            }
        }
Esempio n. 9
0
// match histograms of 'src' to that of 'dst', according to both masks
void Preprocessor::histMatchRGB(cv::Mat& src, const cv::Mat& src_mask, const cv::Mat& dst, const cv::Mat& dst_mask)
{
#ifdef BTM_DEBUG
    namedWindow("original source",CV_WINDOW_AUTOSIZE);
    imshow("original source",src);
    namedWindow("original query",CV_WINDOW_AUTOSIZE);
    imshow("original query",dst);
#endif

    vector<Mat> chns;
    split(src,chns);
    vector<Mat> chns1;
    split(dst,chns1);
    Mat src_hist = Mat::zeros(1,256,CV_64FC1);
    Mat dst_hist = Mat::zeros(1,256,CV_64FC1);
    Mat src_cdf = Mat::zeros(1,256,CV_64FC1);
    Mat dst_cdf = Mat::zeros(1,256,CV_64FC1);
    Mat Mv(1,256,CV_8UC1);
    uchar* M = Mv.ptr<uchar>();

    for(int i=0;i<3;i++) {
        src_hist.setTo(0);
        dst_hist.setTo(0);
        src_cdf.setTo(0);
        src_cdf.setTo(0);

        double* _src_cdf = src_cdf.ptr<double>();
        double* _dst_cdf = dst_cdf.ptr<double>();
        double* _src_hist = src_hist.ptr<double>();
        double* _dst_hist = dst_hist.ptr<double>();

        do1ChnHist(chns[i],src_mask,_src_hist,_src_cdf);
        do1ChnHist(chns1[i],dst_mask,_dst_hist,_dst_cdf);

        uchar last = 0;


        for(int j=0;j<src_cdf.cols;j++) {
            double F1j = _src_cdf[j];

            for(uchar k = last; k<dst_cdf.cols; k++) {
                double F2k = _dst_cdf[k];
                if(abs(F2k - F1j) < HISTMATCH_EPSILON || F2k > F1j) {
                    M[j] = k;
                    last = k;
                    break;
                }
            }
        }

        Mat lut(1,256,CV_8UC1,M);
        LUT(chns[i],lut,chns[i]);
    }

    Mat res;
    merge(chns,res);

#ifdef BTM_DEBUG
    namedWindow("matched",CV_WINDOW_AUTOSIZE);
    imshow("matched",res);

    waitKey(0);
#endif

    res.copyTo(src);
}
//This colors the segmentations
void floodFillPostprocess( Mat& img, const Scalar& colorDiff=Scalar::all(1) )
{    
    
    CV_Assert( !img.empty() );
    RNG rng = theRNG();
    Mat src_gray, canny_output, mask;
    cvtColor( img, src_gray, CV_BGR2GRAY );
    
    Canny( src_gray, canny_output, thresh, 2*thresh, 3 );
    
    
    /*//If other line finders uncomment the switch below
    Mat dst;
    int scale = 1;
    int delta = 0;
    int ddepth = CV_16S;
    int kernel_size = 3; 
    	    Laplacian( src_gray, dst, ddepth, kernel_size, scale, delta, BORDER_DEFAULT );
    	    convertScaleAbs( dst, canny_output );
    /*
    
    switch (lineFinder)
    {
        case '0': 
            Canny( src_gray, canny_output, thresh, 2*thresh, 3 );
            break;
    
    // Other Line finding For mask ... Not useful but looks cool
      // Laplacian
        case '1':
            Laplacian( src_gray, dst, ddepth, kernel_size, scale, delta, BORDER_DEFAULT );
    	    convertScaleAbs( dst, canny_output );
    	    break;
    	case '2':   
    	    //Sobel
    	    /// Generate grad_x and grad_y
    	    Mat grad_x, grad_y;
    	    Mat abs_grad_x, abs_grad_y;
    	    
	    /// Gradient X
	    //Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
	    Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );   
	    convertScaleAbs( grad_x, abs_grad_x );

	    /// Gradient Y  
	    //Scharr( src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT );
	    Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );   
	    convertScaleAbs( grad_y, abs_grad_y );

	    /// Total Gradient (approximate)
	    addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, canny_output );
	    break;
    }
    */
    
    copyMakeBorder( canny_output, mask, 1, 1, 1, 1, BORDER_REPLICATE, 1 );
    int numregions = 0;
    vector<Rect> regions;
    vector<Mat> Masks; 
    Rect region;
    //Mat temp2;
    //original.copyTo(temp2);
    for( int y = 0; y < img.rows; y++ )
    {
        for( int x = 0; x < img.cols; x++ )
        {
            if( mask.at<uchar>(y+1, x+1) == 0 )
            {
                Scalar newVal( rng(256), rng(256), rng(256) );
                floodFill( img, mask, Point(x,y), newVal, &region, colorDiff, colorDiff,  4 );  //FLOODFILL_MASK_ONLY +
                if ((region.height * region.width > 250))
                {
                if(rects)  rectangle( img, Point(region.x,region.y), Point(region.x + region.width, region.y + region.height), Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ), 2, 8, 0 );
                regions.push_back(region);
                Masks.push_back(mask);                
                numregions++;
                }
                
            }
        }
    }
    //cout << "Number of regions: " << numregions << "\tSize of Region Vector: " << regions.size() << endl;
    //imshow( "meanshift", img );
    //imshow( "Regions ROIs", temp2);
    
//    createTrackbar( " Canny thresh:", "Mask", &thresh, max_thresh, meanShiftSegmentation );
    if(display)
    {
        //mask = Scalar::all(0);
        //imshow("mask", mask);
        
        Mat temp2;
        original.copyTo( temp2, canny_output);
        //imshow("Mask", temp2);
        //imshow("Mask After", mask);
        vector<Rect>::iterator it;
        string filename;
        vector<Mat>::iterator itt;
        itt = Masks.begin();
    	
    	
    	it = regions.begin();
    	//just to show first ROI
    	Mat temp;
    	imshow("ROI", img( *it ));
	original.copyTo(temp);
	cout << "Region[" << numregions << "] Values (x,y,row,cols): " << it->x << "\t\t" << it->y << "\t\t" << it->height << "\t\t" << it->width << endl;
	rectangle( temp, Point(it->x,it->y), Point(it->x + it->width, it->y + it->height), Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ), 2, 8, 0 );
	imshow( "meanshift", temp );
	moveWindow("meanshift", 40,40);
	moveWindow("ROI",700,40);
    	
    	for ( ; it+1 < regions.end() ; )
    	{
        
	switch ( (char)waitKey(10) )
	    {
	    case 27: 
		it = regions.end();
		running = false;
		break;
	    case 'q':  case 'Q':
	       	it = regions.end();
	       	display = false;
	        cvDestroyWindow("meanshift");
	        cvDestroyWindow("ROI");
		break;
	    case 'i': case 'I':
	        it++;
		itt++;
		imshow("ROI", img( *it ));
		original.copyTo(temp);
		cout << "Region[" << numregions << "] Values (x,y,row,cols): " << it->x << "\t\t" << it->y << "\t\t" << it->height << "\t\t" << it->width << endl;
		rectangle( temp, Point(it->x,it->y), Point(it->x + it->width, it->y + it->height), Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ), 2, 8, 0 );
		imshow( "meanshift", temp );
		//imshow( "Masks", *itt );
		break;
	    case 's': case 'S':
	        cout << "Name of New file: " << endl;
	        cin >> filename;
	        filename = "./data/imageDatabase/" + filename + ".png"; 
		if (imwrite( filename, original(*it)) )
		    cout << "File: \"" << filename << "\" is saved!" << endl;
		else cout << "File Save Error" << endl;
		break;
	    case 'o': case 'O':
	        string name;
	        ObjRec.matchObsvToDB( original(*it), name );
	        break;
	    }
	} 
    }
}
int main(int argc, char *argv[])
{
	#ifdef WIN32
	_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF ); // dump leaks at return
	//_CrtSetBreakAlloc(287);
	#endif
	
	string verboseLevelConsole;
	string verboseLevelImages;
	bool useFileList = false;
	bool useImgs = false;
	bool useDirectory = false;
	bool useGroundtruth = false;
	vector<path> inputPaths;
	path inputFilelist;
	path inputDirectory;
	vector<path> inputFilenames;
	path configFilename;
	shared_ptr<ImageSource> imageSource;
	path outputPicsDir; // TODO: ImageLogger vs ImageSinks? (see AdaptiveTracking.cpp)
	path groundtruthDir; // TODO: Make more dynamic wrt landmark format. a) What about the loading-flags (1_Per_Folder etc) we have? b) Expose those flags to cmdline? c) Make a LmSourceLoader and he knows about a LM_TYPE (each corresponds to a Parser/Loader class?)
	// TODO Also, sometimes we might have the face-box annotated but not LMs, sometimes only LMs and no Facebox.
	string groundtruthType;

	try {
		po::options_description desc("Allowed options");
		desc.add_options()
			("help,h",
				"produce help message")
			("verbose,v", po::value<string>(&verboseLevelConsole)->implicit_value("DEBUG")->default_value("INFO","show messages with INFO loglevel or below."),
				  "specify the verbosity of the console output: PANIC, ERROR, WARN, INFO, DEBUG or TRACE")
			("verbose-images,w", po::value<string>(&verboseLevelImages)->implicit_value("INTERMEDIATE")->default_value("FINAL","write images with FINAL loglevel or below."),
				  "specify the verbosity of the image output: FINAL, INTERMEDIATE, INFO, DEBUG or TRACE")
			("config,c", po::value<path>(&configFilename)->required(), 
				"path to a config (.cfg) file")
			("input,i", po::value<vector<path>>(&inputPaths)->required(), 
				"input from one or more files, a directory, or a  .lst/.txt-file containing a list of images")
			("groundtruth,g", po::value<path>(&groundtruthDir), 
				"load ground truth landmarks from the given folder along with the images and output statistics of the detection results")
			("groundtruth-type,t", po::value<string>(&groundtruthType), 
				"specify the type of landmarks to load: lst, ibug")
			("output-dir,o", po::value<path>(&outputPicsDir)->default_value("."),
				"output directory for the result images")
		;

		po::positional_options_description p;
		p.add("input", -1);
		
		po::variables_map vm;
		po::store(po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);
		po::notify(vm);

		if (vm.count("help")) {
			cout << "Usage: ffpDetectApp [options]\n";
			cout << desc;
			return EXIT_SUCCESS;
		}
	
		if (vm.count("groundtruth")) {
			useGroundtruth = true;
			if (!vm.count("groundtruth-type")) {
				cout << "You have specified to use ground truth. Please also specify the type of the landmarks to load via --groundtruth-type or -t." << endl;
				return EXIT_SUCCESS;
			}
		}

	} catch(std::exception& e) {
		cout << e.what() << endl;
		return EXIT_FAILURE;
	}

	loglevel logLevel;
	if(boost::iequals(verboseLevelConsole, "PANIC")) logLevel = loglevel::PANIC;
	else if(boost::iequals(verboseLevelConsole, "ERROR")) logLevel = loglevel::ERROR;
	else if(boost::iequals(verboseLevelConsole, "WARN")) logLevel = loglevel::WARN;
	else if(boost::iequals(verboseLevelConsole, "INFO")) logLevel = loglevel::INFO;
	else if(boost::iequals(verboseLevelConsole, "DEBUG")) logLevel = loglevel::DEBUG;
	else if(boost::iequals(verboseLevelConsole, "TRACE")) logLevel = loglevel::TRACE;
	else {
		cout << "Error: Invalid loglevel." << endl;
		return EXIT_FAILURE;
	}
	imagelogging::loglevel imageLogLevel;
	if(boost::iequals(verboseLevelImages, "FINAL")) imageLogLevel = imagelogging::loglevel::FINAL;
	else if(boost::iequals(verboseLevelImages, "INTERMEDIATE")) imageLogLevel = imagelogging::loglevel::INTERMEDIATE;
	else if(boost::iequals(verboseLevelImages, "INFO")) imageLogLevel = imagelogging::loglevel::INFO;
	else if(boost::iequals(verboseLevelImages, "DEBUG")) imageLogLevel = imagelogging::loglevel::DEBUG;
	else if(boost::iequals(verboseLevelImages, "TRACE")) imageLogLevel = imagelogging::loglevel::TRACE;
	else {
		cout << "Error: Invalid image loglevel." << endl;
		return EXIT_FAILURE;
	}
	
	Loggers->getLogger("classification").addAppender(make_shared<logging::ConsoleAppender>(logLevel));
	Loggers->getLogger("imageio").addAppender(make_shared<logging::ConsoleAppender>(logLevel));
	Loggers->getLogger("imageprocessing").addAppender(make_shared<logging::ConsoleAppender>(logLevel));
	Loggers->getLogger("detection").addAppender(make_shared<logging::ConsoleAppender>(logLevel));
	Loggers->getLogger("shapemodels").addAppender(make_shared<logging::ConsoleAppender>(logLevel));
	Loggers->getLogger("ffpDetectApp").addAppender(make_shared<logging::ConsoleAppender>(logLevel));
	Logger appLogger = Loggers->getLogger("ffpDetectApp");

	appLogger.debug("Verbose level for console output: " + logging::loglevelToString(logLevel));
	appLogger.debug("Verbose level for image output: " + imagelogging::loglevelToString(imageLogLevel));
	appLogger.debug("Using config: " + configFilename.string());
	appLogger.debug("Using output directory: " + outputPicsDir.string());
	if(outputPicsDir == ".") {
		appLogger.info("Writing output images into current directory.");
	}

	ImageLoggers->getLogger("detection").addAppender(make_shared<imagelogging::ImageFileWriter>(imageLogLevel, outputPicsDir));
	ImageLoggers->getLogger("app").addAppender(make_shared<imagelogging::ImageFileWriter>(imageLogLevel, outputPicsDir / "final"));

	if (inputPaths.size() > 1) {
		// We assume the user has given several, valid images
		useImgs = true;
		inputFilenames = inputPaths;
	} else if (inputPaths.size() == 1) {
		// We assume the user has given either an image, directory, or a .lst-file
		if (inputPaths[0].extension().string() == ".lst" || inputPaths[0].extension().string() == ".txt") { // check for .lst or .txt first
			useFileList = true;
			inputFilelist = inputPaths.front();
		} else if (boost::filesystem::is_directory(inputPaths[0])) { // check if it's a directory
			useDirectory = true;
			inputDirectory = inputPaths.front();
		} else { // it must be an image
			useImgs = true;
			inputFilenames = inputPaths;
		}
	} else {
		appLogger.error("Please either specify one or several files, a directory, or a .lst-file containing a list of images to run the program!");
		return EXIT_FAILURE;
	}

	if (useFileList==true) {
		appLogger.info("Using file-list as input: " + inputFilelist.string());
		shared_ptr<ImageSource> fileListImgSrc; // TODO VS2013 change to unique_ptr, rest below also
		try {
			fileListImgSrc = make_shared<FileListImageSource>(inputFilelist.string(), "C:\\Users\\Patrik\\Documents\\GitHub\\data\\fddb\\originalPics\\", ".jpg");
		} catch(const std::runtime_error& e) {
			appLogger.error(e.what());
			return EXIT_FAILURE;
		}
		imageSource = fileListImgSrc;
	}
	if (useImgs==true) {
		//imageSource = make_shared<FileImageSource>(inputFilenames);
		//imageSource = make_shared<RepeatingFileImageSource>("C:\\Users\\Patrik\\GitHub\\data\\firstrun\\ws_8.png");
		appLogger.info("Using input images: ");
		vector<string> inputFilenamesStrings;	// Hack until we use vector<path> (?)
		for (const auto& fn : inputFilenames) {
			appLogger.info(fn.string());
			inputFilenamesStrings.push_back(fn.string());
		}
		shared_ptr<ImageSource> fileImgSrc;
		try {
			fileImgSrc = make_shared<FileImageSource>(inputFilenamesStrings);
		} catch(const std::runtime_error& e) {
			appLogger.error(e.what());
			return EXIT_FAILURE;
		}
		imageSource = fileImgSrc;
	}
	if (useDirectory==true) {
		appLogger.info("Using input images from directory: " + inputDirectory.string());
		try {
			imageSource = make_shared<DirectoryImageSource>(inputDirectory.string());
		} catch(const std::runtime_error& e) {
			appLogger.error(e.what());
			return EXIT_FAILURE;
		}
	}

	// Load the ground truth
	// Either a) use if/else for imageSource or labeledImageSource, or b) use an EmptyLandmarkSoure
	shared_ptr<LabeledImageSource> labeledImageSource;
	shared_ptr<NamedLandmarkSource> landmarkSource;
	if (useGroundtruth) {
		vector<path> groundtruthDirs; groundtruthDirs.push_back(groundtruthDir); // Todo: Make cmdline use a vector<path>
		shared_ptr<LandmarkFormatParser> landmarkFormatParser;
		if(boost::iequals(groundtruthType, "lst")) {
			landmarkFormatParser = make_shared<LstLandmarkFormatParser>();
			landmarkSource = make_shared<DefaultNamedLandmarkSource>(LandmarkFileGatherer::gather(imageSource, string(), GatherMethod::SEPARATE_FILES, groundtruthDirs), landmarkFormatParser);
		} else if(boost::iequals(groundtruthType, "ibug")) {
			landmarkFormatParser = make_shared<IbugLandmarkFormatParser>();
			landmarkSource = make_shared<DefaultNamedLandmarkSource>(LandmarkFileGatherer::gather(imageSource, ".pts", GatherMethod::ONE_FILE_PER_IMAGE_SAME_DIR, groundtruthDirs), landmarkFormatParser);
		} else {
			cout << "Error: Invalid ground truth type." << endl;
			return EXIT_FAILURE;
		}
	} else {
		landmarkSource = make_shared<EmptyLandmarkSource>();
	}
	labeledImageSource = make_shared<NamedLabeledImageSource>(imageSource, landmarkSource);

	ptree pt;
	try {
		read_info(configFilename.string(), pt);
	} catch(const boost::property_tree::ptree_error& error) {
		appLogger.error(error.what());
		return EXIT_FAILURE;
	}

	string morphableModelFile;
	string morphableModelVertexMappingFile;
	shapemodels::MorphableModel mm;
	
	try {
		ptree ptFeaturePointValidation = pt.get_child("featurePointValidation");
		morphableModelFile = ptFeaturePointValidation.get<string>("morphableModel");
		morphableModelVertexMappingFile = ptFeaturePointValidation.get<string>("morphableModelVertexMapping");
		mm = shapemodels::MorphableModel::loadScmModel("C:\\Users\\Patrik\\Documents\\GitHub\\bsl_model_first\\SurreyLowResGuosheng\\NON3448\\ShpVtxModelBin_NON3448.scm", "C:\\Users\\Patrik\\Documents\\GitHub\\featurePoints_SurreyScm.txt");
	} catch (const boost::property_tree::ptree_error& error) {
		appLogger.error(error.what());
		return EXIT_FAILURE;
	}
	
	FddbLandmarkSink landmarkSink("annotatedList.txt");
	landmarkSink.open(outputPicsDir.string() + "/final/" + "detectedFaces.txt");
	

	// ---

	boost::interprocess::managed_shared_memory managed_shm(boost::interprocess::open_only, "CameraInputMem");
	//catch (boost::interprocess::interprocess_exception& e)

	typedef boost::interprocess::allocator<uchar, boost::interprocess::managed_shared_memory::segment_manager> CharAllocator;
	typedef boost::interprocess::vector<uchar, CharAllocator> IpcVec;
	const CharAllocator alloc_inst(managed_shm.get_segment_manager());

	typedef boost::interprocess::allocator<float, boost::interprocess::managed_shared_memory::segment_manager> FloatAllocator;
	typedef boost::interprocess::vector<float, FloatAllocator> FloatVec;
	vector<float> landmarksX;
	vector<float> landmarksY;

	shapemodels::CameraEstimation cameraEstimation(mm);
	vector<imageio::ModelLandmark> landmarks;

	cv::namedWindow("win");
	while (true) {
		std::pair<IpcVec*, std::size_t> s = managed_shm.find<IpcVec>("EncodedImage"); // check for s.second != 1?
		vector<uchar> vimg;
		for (const auto& e : *s.first) {
			vimg.push_back(e);
		}	
		cv::Mat img2 = cv::imdecode(vimg, -1); // opt.: 3rd: a *Mat for no reallocation every frame
		if (img2.rows <= 0 || img2.cols <= 0) {
			continue;
		}
		std::pair<FloatVec*, std::size_t> sX = managed_shm.find<FloatVec>("LandmarksX"); // check for s.second != 1?
		std::pair<FloatVec*, std::size_t> sY = managed_shm.find<FloatVec>("LandmarksY"); // check for s.second != 1?
		landmarksX.clear();
		landmarksY.clear();
		if (sX.second == 1 && sY.second == 1) {
			for (const auto& lm : *sX.first) {
				landmarksX.push_back(lm);
			}
			for (const auto& lm : *sY.first) {
				landmarksY.push_back(lm);
			}
		
			for (int i = 0; i < landmarksX.size(); ++i) {
				cv::circle(img2, cv::Point((int)landmarksX[i], (int)landmarksY[i]), 1, cv::Scalar(0,255,0), -1);
			}
			landmarks.clear();
			landmarks.emplace_back(imageio::ModelLandmark("right.eye.corner_outer", landmarksX[19], landmarksY[19]));
			landmarks.emplace_back(imageio::ModelLandmark("right.eye.corner_inner", landmarksX[22], landmarksY[22]));
			landmarks.emplace_back(imageio::ModelLandmark("left.eye.corner_outer", landmarksX[28], landmarksY[28]));
			landmarks.emplace_back(imageio::ModelLandmark("left.eye.corner_inner", landmarksX[25], landmarksY[25]));
			landmarks.emplace_back(imageio::ModelLandmark("center.nose.tip", landmarksX[13], landmarksY[13]));
			landmarks.emplace_back(imageio::ModelLandmark("right.lips.corner", landmarksX[31], landmarksY[31]));
			landmarks.emplace_back(imageio::ModelLandmark("left.lips.corner", landmarksX[37], landmarksY[37]));
			
			int max_d = std::max(img2.rows, img2.cols); // should be the focal length? (don't forget the aspect ratio!). TODO Read in Hartley-Zisserman what this is
			//int max_d = 700;
			Mat camMatrix = (cv::Mat_<double>(3,3) << max_d, 0,		img2.cols/2.0,
				0,	 max_d, img2.rows/2.0,
				0,	 0,		1.0);

			pair<Mat, Mat> rotTransRodr = cameraEstimation.estimate(landmarks, camMatrix);
			Mat rvec = rotTransRodr.first;
			Mat tvec = rotTransRodr.second;

			Mat rotation_matrix(3, 3, CV_64FC1);
			cv::Rodrigues(rvec, rotation_matrix);
			rotation_matrix.convertTo(rotation_matrix, CV_32FC1);
			Mat translation_vector = tvec;
			translation_vector.convertTo(translation_vector, CV_32FC1);

			camMatrix.convertTo(camMatrix, CV_32FC1);

			for (const auto& p : landmarks) {
				cv::rectangle(img2, cv::Point(cvRound(p.getX()-2.0f), cvRound(p.getY()-2.0f)), cv::Point(cvRound(p.getX()+2.0f), cvRound(p.getY()+2.0f)), cv::Scalar(255, 0, 0));
			}
			//vector<Point2f> projectedPoints;
			//projectPoints(modelPoints, rvec, tvec, camMatrix, vector<float>(), projectedPoints); // same result as below
			Mat extrinsicCameraMatrix = Mat::zeros(4, 4, CV_32FC1);
			Mat extrRot = extrinsicCameraMatrix(cv::Range(0, 3), cv::Range(0, 3));
			rotation_matrix.copyTo(extrRot);
			Mat extrTrans = extrinsicCameraMatrix(cv::Range(0, 3), cv::Range(3, 4));
			translation_vector.copyTo(extrTrans);
			extrinsicCameraMatrix.at<float>(3, 3) = 1;

			Mat intrinsicCameraMatrix = Mat::zeros(4, 4, CV_32FC1);
			Mat intrinsicCameraMatrixMain = intrinsicCameraMatrix(cv::Range(0, 3), cv::Range(0, 3));
			camMatrix.copyTo(intrinsicCameraMatrixMain);
			intrinsicCameraMatrix.at<float>(3, 3) = 1;
			
			vector<Point3f> points3d;
			for (const auto& landmark : landmarks) {
				points3d.emplace_back(mm.getShapeModel().getMeanAtPoint(landmark.getName()));
			}
			for (const auto& v : points3d) {
				Mat vertex(v);
				Mat vertex_homo = Mat::ones(4, 1, CV_32FC1);
				Mat vertex_homo_coords = vertex_homo(cv::Range(0, 3), cv::Range(0, 1));
				vertex.copyTo(vertex_homo_coords);
				Mat v2 = rotation_matrix * vertex;
				Mat v3 = v2 + translation_vector;
				Mat v3_mat = extrinsicCameraMatrix * vertex_homo;

				Mat v4 = camMatrix * v3;
				Mat v4_mat = intrinsicCameraMatrix * v3_mat;

				Point3f v4p(v4);
				Point2f v4p2d(v4p.x/v4p.z, v4p.y/v4p.z); // if != 0
				Point3f v4p_homo(v4_mat(cv::Range(0, 3), cv::Range(0, 1)));
				Point2f v4p2d_homo(v4p_homo.x/v4p_homo.z, v4p_homo.y/v4p_homo.z); // if != 0
				cv::rectangle(img2, cv::Point(cvRound(v4p2d_homo.x-2.0f), cvRound(v4p2d_homo.y-2.0f)), cv::Point(cvRound(v4p2d_homo.x+2.0f), cvRound(v4p2d_homo.y+2.0f)), cv::Scalar(255, 0, 0));
			}

			std::shared_ptr<render::Mesh> meshToDraw = std::make_shared<render::Mesh>(mm.getMean());

			const float aspect = (float)img2.cols/(float)img2.rows; // 640/480
			render::Camera camera(Vec3f(0.0f, 0.0f, 0.0f), /*horizontalAngle*/0.0f*(CV_PI/180.0f), /*verticalAngle*/0.0f*(CV_PI/180.0f), render::Frustum(-1.0f*aspect, 1.0f*aspect, -1.0f, 1.0f, /*zNear*/-0.1f, /*zFar*/-100.0f));
			render::SoftwareRenderer r(img2.cols, img2.rows, camera); // 640, 480
			//r.setModelTransform(render::utils::MatrixUtils::createScalingMatrix(1.0f/140.0f, 1.0f/140.0f, 1.0f/140.0f));
			r.setObjectToScreenTransform(intrinsicCameraMatrix * extrinsicCameraMatrix);
			r.draw(meshToDraw, nullptr);
			Mat buff = r.getImage();
			Mat buffWithoutAlpha;
			//buff.convertTo(buffWithoutAlpha, CV_BGRA2BGR);
			cvtColor(buff, buffWithoutAlpha, cv::COLOR_BGRA2BGR);
			Mat weighted = img2.clone(); // get the right size
			cv::addWeighted(img2, 0.7, buffWithoutAlpha, 0.3, 0.0, weighted);
			//return std::make_pair(translation_vector, rotation_matrix);
			img2 = weighted;
		}
		cv::imshow("win", img2);
		cv::waitKey(5);
	}

	// ---


	std::chrono::time_point<std::chrono::system_clock> start, end;
	Mat img;
	while(labeledImageSource->next()) {
		start = std::chrono::system_clock::now();
		appLogger.info("Starting to process " + labeledImageSource->getName().string());
		img = labeledImageSource->getImage();
	
		end = std::chrono::system_clock::now();
		int elapsed_mseconds = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
		appLogger.debug("Finished face-detection. Elapsed time: " + lexical_cast<string>(elapsed_mseconds) + "ms.\n");
	
		// Log the image with the max positive of every feature
		ImageLogger appImageLogger = ImageLoggers->getLogger("app");
		appImageLogger.setCurrentImageName(labeledImageSource->getName().stem().string());
		appImageLogger.intermediate(img, doNothing, "AllFfpMaxPos");

		LandmarkCollection groundtruth = labeledImageSource->getLandmarks();

		end = std::chrono::system_clock::now();
		elapsed_mseconds = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
		appLogger.info("Finished processing " + labeledImageSource->getName().string() + ". Elapsed time: " + lexical_cast<string>(elapsed_mseconds) + "ms.\n");
		
		//landmarkSink.add(labeledImageSource->getName().string(), faces, scores);

	}
	landmarkSink.close();

	appLogger.info("Finished!");

	return 0;
}
Esempio n. 12
0
//////////////////////////////////////////////////////////////////////////
//	do convolution in Frequency domain
//	@return		final image after convolution
//////////////////////////////////////////////////////////////////////////
bool cvFFTConvolution(const Mat& channel, const Mat& kernel, Mat& conv)
{
	//create complex image and kernel
	//////////////////////////////////////////////////////////////////////////
	Mat complexImg(channel.rows, channel.cols, CV_32FC2);
	vector<Mat> vec_mat(2);
	vec_mat[0] = channel.clone();
	vec_mat[1] = channel.clone();
	vec_mat[1].setTo(0);
	merge(vec_mat, complexImg);

	//////////////////////////////////////////////////////////////////////////
	Mat complexKernel(kernel.rows, kernel.cols, CV_32FC2);
	kernel.copyTo(vec_mat[0]);
	kernel.copyTo(vec_mat[1]);
	vec_mat[1].setTo(0);
	vec_mat[1] = kernel.clone();
	merge(vec_mat, complexKernel);

	//////////////////////////////////////////////////////////////////////////
	/*int dft_M = getOptimalDFTSize(channel.rows*2-1);
	int dft_N = getOptimalDFTSize(channel.cols*2-1);*/

	Mat dft_A(channel.rows, channel.cols, CV_32FC2);
	dft_A.setTo(0);
	Mat dft_B(kernel.rows, kernel.cols, CV_32FC2);
	dft_B.setTo(0);

	//////////////////////////////////////////////////////////////////////////
	// do dft for image
	//////////////////////////////////////////////////////////////////////////
	Mat tmp = dft_A(Rect(0, 0, channel.cols, channel.rows));
	complexImg.copyTo(tmp);

	dft(dft_A, dft_A, CV_DXT_FORWARD);


	//////////////////////////////////////////////////////////////////////////
	// do dft for kernel
	//////////////////////////////////////////////////////////////////////////
	tmp = dft_B(Rect(0, 0, kernel.cols, kernel.rows));
	complexKernel.copyTo(tmp);

	//do dft
	dft(dft_B, dft_B, CV_DXT_FORWARD);

	//shift kernel to center
	Mat dft_BB = dft_B.clone();
	ShiftDFT(dft_B, dft_BB);
	dft_BB.copyTo(dft_B);

	//////////////////////////////////////////////////////////////////////////
	//	do convolution
	//////////////////////////////////////////////////////////////////////////
	mulSpectrums(dft_A, dft_B, dft_A, CV_DXT_MUL_CONJ);

	//do inverse dft
	dft(dft_A, dft_A, CV_DXT_INV_SCALE, channel.rows);

	split(dft_A, vec_mat);
	pow(vec_mat[0], 2, vec_mat[0]);
	pow(vec_mat[1], 2, vec_mat[1]);
	vec_mat[0] += vec_mat[1];
	pow(vec_mat[0], 2, vec_mat[0]);

	//copy back
	vec_mat[0].copyTo(conv);

	return true;
}
Esempio n. 13
0
std::vector<KeyPoint> findPaper (Mat frame, double avgBright) {
  Mat colors[3];
  Mat colored;
  Mat copy;
  
  frame.copyTo(colored);
  cvtColor(colored,colored,CV_BGR2HSV);
  split(frame,colors);
  copy = frame;
   
  cvtColor(frame,frame,CV_BGR2GRAY);
  GaussianBlur(frame,frame,Size(3,3),0);

   
  for(int parameter = 10; parameter > 0; parameter--) {
   
    double offset = 1;
    std:vector<KeyPoint> points;
    std::vector<KeyPoint> allPoints;
    FAST(frame,allPoints,parameter);

    do {      
      points = std::vector<KeyPoint>(allPoints);
      std::vector<int> keep(points.size());
      for (std::vector<KeyPoint>::iterator it = points.begin(); it != points.end(); it++) {
	KeyPoint pt = *it;
	Vec3b at = colored.at<Vec3b>(pt.pt);
	int numRed = 0;
	int numWhite = 0;
	 double dr[8] = { 0, -1,-1, -1, 0,  1, 1, 1};
	 double dc[8] = { 1, 1,  0, -1,-1,-1, 0, 1};
	 std::vector<int> deltaRow (dr, dr+sizeof(dr)/sizeof(int));
	 std::vector<int> deltaCol (dc, dc+sizeof(dc)/sizeof(int));
  	  
	 for(unsigned int i = 0; i < deltaRow.size(); i++) {
	   KeyPoint delta = pt;
	   int rowAdd = offset;
	   int colAdd = offset;
	  
	   delta.pt.x = pt.pt.x + (int) (deltaRow[i] * rowAdd);
	   delta.pt.y = pt.pt.y + (int) (deltaCol[i] * colAdd);
	   if(delta.pt.x < 0 || delta.pt.y < 0 || delta.pt.x >= frame.cols || delta.pt.y >= frame.rows) {
	     continue;
	   }
	   int colorCode = 0;
	  Vec3b neighbor = colored.at<Vec3b>(delta.pt);
	  int count = 0;
	  for(int i = 0; i < 3; i++) {
	    if((int)neighbor[i] == 255){count++;} 
	  }
	  
	  
	  if(count >= 2){
	    continue;
	  }
	  
	  if((neighbor[0] > LOW_RED || neighbor[0] < HIGH_RED) && neighbor[1] > 73 && neighbor[2] > 50) {
	    numRed++;
	    colorCode = 1;
	  } else if( neighbor[2] > avgBright) {
	    numWhite++;
	    colorCode = 2;
	  }
	  
	  if(colorCode == 0) {
	    colored.at<Vec3b>(delta.pt)[0]= 3;
	    colored.at<Vec3b>(delta.pt)[1] = 100;
	    colored.at<Vec3b>(delta.pt)[2] = 50;
	  } else if (colorCode == 1) {
	    colored.at<Vec3b>(delta.pt)[0]= 100;
	    colored.at<Vec3b>(delta.pt)[1] = 255;
	    colored.at<Vec3b>(delta.pt)[2] = 255;
	  } else if (colorCode == 2) {
	    colored.at<Vec3b>(delta.pt)[0]= 10;
	    colored.at<Vec3b>(delta.pt)[1] = 255;
	    colored.at<Vec3b>(delta.pt)[2] = 255;
	    
	  }
	}  
	if(numRed != 5 ||( numWhite != 3 )) {
	  keep[it - points.begin()] = -1;
	}
      }
      
      int size = points.size();
      int trueIndex = 0;
      int loopIndex = 0;
      
      while(trueIndex < size) {
	if(keep[trueIndex] == -1) {
	  points.erase(points.begin()+loopIndex);
	  
	  trueIndex++;
	} else {
	  loopIndex++;
	  trueIndex++;
	}
      }
      offset++;
    } while(points.size() != 4 && offset < 20);
    if(points.size() == 4){return points;}
  }
  
}
Esempio n. 14
0
//----------------------------------------------------------------------------------------------------------
void RunTracking::trackFilteredObject(TrackedPiece &piece, Mat &cameraFeed, Mat &threshold_image){

	vector <TrackedPiece> pieces;

	Mat temp;
	threshold_image.copyTo(temp);
	//these two vectors needed for output of findContours
	vector< vector<Point> > contours;
	vector<Vec4i> hierarchy;
	//find contours of filtered image using openCV findContours function
	findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
	//use moments method to find our filtered object
	bool objectFound = false;
	if (hierarchy.size() > 0) {
		int numObjects = hierarchy.size();
//		cout << "Num objects: " << numObjects << endl;
//		cout << "Max Num objects: " << MAX_NUM_OBJECTS << endl;
		// threshholed to calculate movement
		const int thresh = 40;
		//saves max area of each contour detected so only the largest one will be tracked
		double maxArea = 0;
		// temporary piece for contours found
		TrackedPiece tmp;

		//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
		if(numObjects < MAX_NUM_OBJECTS){
			// for each object (contour) detected
			for (int index = 0; index >= 0; index = hierarchy[index][0]) {

				// get the moment of the contour
				Moments moment = moments((cv::Mat)contours[index]);
				// get the area from the moment
				double area = moment.m00;
//				cout << "Area " << index << " is: " << area << endl;
				
				// if the area is less than MIN_OBJECT_AREA then it is probably just noise
				// it must also be large than the max area found so far since we only want the largest area.
				if(area > MIN_OBJECT_AREA && area > maxArea){
					// set new max area
					maxArea = area;
					// Clear previous objects found so only one (the biggest) is detected
					pieces.clear();

					int xPos = moment.m10/area;
					int yPos = moment.m01/area;
					
					tmp.setXPos(xPos);
					tmp.setYPos(yPos);
					tmp.setName(piece.getName());
					tmp.setColor(piece.getColor());

					//cout << piece.getName() << ": x: " << xPos << " y: " << yPos << endl;
					//cout << "LastPos: x: " << piece.getLastxPos() << " y: " << piece.getLastyPos() << endl;

					pieces.push_back(tmp);

					objectFound = true;
				}
			}

			//let user know you found an object and check for movement
			if(objectFound ==true){

				// Update piece location (tmp piece should now be biggest contour found)
				piece.setXPos(tmp.getXPos());
				piece.setYPos(tmp.getYPos());

				/*
				 * Movement checking moved to timerTick
				 *
				// Check for movement (tmp piece should now be biggest contour found)
				if(tmp.getXPos() > (piece.getLastxPos() + thresh) || tmp.getXPos() < (piece.getLastxPos() - thresh))
				{
					piece.setLastxPos(tmp.getXPos());
					cout << piece.getName() << ": X movement" << endl;
				}
				if(tmp.getYPos() > (piece.getLastyPos() + thresh) || tmp.getYPos() < (piece.getLastyPos() - thresh))
				{
					piece.setLastyPos(tmp.getYPos());
					cout << piece.getName() << ": Y movement." << endl;
				}
				*/

				//draw object location on screen
				drawObject(pieces,cameraFeed);}

		}else putText(cameraFeed,"TOO MUCH NOISE! ADJUST FILTER",Point(0,50),1,2,Scalar(0,0,255),2);
	}
}
	//Hue callback is not currently used
  void hueCallback(const sensor_msgs::ImageConstPtr& msg_ptr) {

    try {
      ptr_hue = cv_bridge::toCvCopy(msg_ptr, "8UC1");
      //imshow(ptr_hue->image);
      ptr_hue->image.copyTo(*hue_image);
    } catch (cv_bridge::Exception& e) {
      ROS_ERROR("cv_bridge exception: %s", e.what());
    }
		
    int rangeMin = (mean_color - window_size)%255;
    int rangeMax = (mean_color + window_size)%255;
    //int otherRangeMin = (other_mean_color - window_size)%255;
    //int otherRangeMax = (other_mean_color + window_size)%255;

    if(rangeMin > rangeMax){
      int temp = rangeMax;
      rangeMax = rangeMin;
      rangeMin = rangeMax;
    }
/*
    if(otherRangeMin > otherRangeMax){
      int temp = otherRangeMax;
      otherRangeMax = otherRangeMin;
      otherRangeMin = otherRangeMax;
    }
*/
    cout << "range: " << rangeMin << " " << rangeMax << endl;
    inRange(*hue_image, Scalar((double)rangeMin),Scalar((double)rangeMax), *back_img);
    *color_cc_image = Scalar(0);
    back_img->copyTo(*hue_image);
    Size ksize = Size(2 * blurKernel + 1,2 * blurKernel + 1);
    GaussianBlur(*back_img, *blurred_image, ksize, -1, -1);

    //attempts at adaptive thresholding

    //adaptiveThreshold(*blurred_image, *temp_blurred_image, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 3, .5);
    //threshold(*blurred_image, *temp_blurred_image, THRESH_OTSU, 255, THRESH_BINARY); 

    threshold(*blurred_image, *temp_blurred_image, 110, 255, THRESH_BINARY); 
    convertScaleAbs(*temp_blurred_image, *back_img, 1, 0);
    hue_image->copyTo(*copy_image);

    if (display_image_){
      imshow(color_topic, *back_img);
    }

    getConnectedComponents();

    //Find Connected Components
    //I added this-it used to be just getConnectedComponent
/*
    findContours(*back_img, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point());
    if(!contours.empty())
      numCC = getThresholdConnectedComponents();

    if (contours.empty()){
      inRange(*hue_image, Scalar((double)otherRangeMin),Scalar((double)otherRangeMax), *back_img);
      *color_cc_image = Scalar(0);
      back_img->copyTo(*hue_image);
      Size ksize = Size(2 * blurKernel + 1,2 * blurKernel + 1);
      GaussianBlur(*back_img, *blurred_image, ksize, -1, -1);
      Mat* sat_image;
      threshold(*blurred_image, *temp_blurred_image, 110, 255, THRESH_BINARY); 
      convertScaleAbs(*temp_blurred_image, *back_img, 1, 0);
      hue_image->copyTo(*copy_image);
      findContours(*back_img, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point());
      if(!contours.empty())
        numCC = getThresholdConnectedComponents();
    }
*/		
    for (int i = 0; (i < (int)comps.size()) && (comps[i].idx >= 0) && (comps[i].idx < (int)contours.size()); i++) {
      Scalar color( (rand()&255), (rand()&255), (rand()&255) );
      drawContours(*color_cc_image, contours, comps[i].idx, color, 3, 8, hierarchy,0, Point());
      drawContours(*hue_image, contours, 0, comps[i].idx, 3, 8, hierarchy,0, Point());
    }

    getMoments();

    blobTracker->updateKalmanFiltersConnectedComponents();
    if (numCC > 0)
      blobTracker->getFilteredBlobs(true);
    else
      blobTracker->getFilteredBlobs(false);
    //cv::imshow("cam",hue_image);
    //Draw Filtered Blobs
    
    RotatedRect box;
    Point pt;
    //Image_msgs::FeatureMoments moments;
    //moments.num = 0;
    for(int i=0; i<MAX_FEATURES; i++)
      {
	FeatureBlob ftemp;
	blobTracker->filters_[i].getFiltered(ftemp);
	//cout << "valid? " << ftemp.getValid() << endl;
	if(ftemp.getValid() && display_image_)
	  {
	    //cout << "Should be displaying something!" << endl;
	    Mat firstTemp, secondTemp;
	    ftemp.getValues(firstTemp, secondTemp);
	    pt.x = firstTemp.at<float>(0,0); pt.y = firstTemp.at<float>(1,0);
	    blobTracker->getBoxFromCov(pt, secondTemp, box);

	    if (box.size.width > 0 && box.size.height > 0 && box.size.width < width && box.size.height < height)
	      {
		int octopus; 	      	
		//ellipse(*rgb_image, box, CV_RGB(0,255,255), 3, 8);
		//circle(*rgb_image, pt, 3, CV_RGB(255, 255, 255), -1, 8);
	      }	    

	  }
      }
    //image_pub_.publish(moments);
    if (display_image_) {
      imshow("Color Blobs", *rgb_image);
      imshow("Connected Components", *color_cc_image);
    }
    waitKey(2);
  }
Esempio n. 16
0
 void init(Mat _intrinsics, Mat _distCoeffs)
 {
     _intrinsics.copyTo(intrinsics);
     _distCoeffs.copyTo(distortion);
 }
Esempio n. 17
0
int main(int argc, const char* argv[])
{

	//Read command line inputs to determine how the program will execute
	ProgParams params;
	parseCommandInputs(argc, argv, params);

	//start mjpeg stream thread
	pthread_create(&MJPEG, NULL, VideoCap, &params);

	//Create Local Processing Image Variables
	Mat img, thresholded, output;


	//initialize variables so processing loop is false;
	targets.matchStart = false;
	targets.validFrame = false;
	targets.hotLeftOrRight = 0;
	progRun = false;

	struct timespec start, end;

	//run loop forever
	while (true)
	{
		//check if program is allowed to run
		//this bool, is enabled by the mjpeg thread
		//once it is up to 10fps

		if (params.Process && progRun)
		{
			//start clock to determine our processing time;
			clock_gettime(CLOCK_REALTIME, &start);

			pthread_mutex_lock(&frameMutex);
			if (!frame.empty())
			{
				frame.copyTo(img);
				pthread_mutex_unlock(&frameMutex);

				thresholded = ThresholdImage(img);

				//Lock Targets and determine goals
				pthread_mutex_lock(&targetMutex);
				findTarget(img, thresholded, targets, params);
				CalculateDist(targets);

				if(params.Debug)
				{
					cout<<"Vert: "<<targets.VertGoal<<endl;
					cout<<"Horiz: "<<targets.HorizGoal<<endl;
					cout<<"Hot Goal: "<<targets.HotGoal<<endl;
					cout<<"Dist:" <<targets.targetDistance<<endl<<endl;
				}
				pthread_mutex_unlock(&targetMutex);

				clock_gettime(CLOCK_REALTIME, &end);

				if(params.Timer)
					cout << "It took " << diffClock(start,end) << " seconds to process frame \n";


			}

			pthread_mutex_unlock(&frameMutex);

			if(params.Visualize)
				waitKey(5);

		}

		usleep(1000); //20000 sleep for 5ms); // run 40 times a second
	}

	//if we end the process code, wait for threads to end
	pthread_join(MJPEG, NULL);

	//done
	return 0;

}
Esempio n. 18
0
void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
                        InputArray _cameraMatrix, InputArray _distCoeffs,
                        OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
                        int iterationsCount, float reprojectionError, int minInliersCount,
                        OutputArray _inliers, int flags)
{
    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
    Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
    
    CV_Assert(opoints.isContinuous());
    CV_Assert(opoints.depth() == CV_32F);
    CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
    CV_Assert(ipoints.isContinuous());
    CV_Assert(ipoints.depth() == CV_32F);
    CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);
    
    _rvec.create(3, 1, CV_64FC1);
    _tvec.create(3, 1, CV_64FC1);
    Mat rvec = _rvec.getMat();
    Mat tvec = _tvec.getMat();
    
    Mat objectPoints = opoints.reshape(3, 1), imagePoints = ipoints.reshape(2, 1);
    
    if (minInliersCount <= 0)
        minInliersCount = objectPoints.cols;
    cv::pnpransac::Parameters params;
    params.iterationsCount = iterationsCount;
    params.minInliersCount = minInliersCount;
    params.reprojectionError = reprojectionError;
    params.useExtrinsicGuess = useExtrinsicGuess;
    params.camera.init(cameraMatrix, distCoeffs);
	params.flags = flags;
    
    vector<int> localInliers;
    Mat localRvec, localTvec;
    rvec.copyTo(localRvec);
    tvec.copyTo(localTvec);
    
    if (objectPoints.cols >= pnpransac::MIN_POINTS_COUNT)
    {
        parallel_for(BlockedRange(0,iterationsCount), cv::pnpransac::PnPSolver(objectPoints, imagePoints, params,
                                                                               localRvec, localTvec, localInliers));
    }
    
    if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT)
    {
		if (flags != CV_P3P)
		{
			int i, pointsCount = (int)localInliers.size();
			Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
			for (i = 0; i < pointsCount; i++)
			{
				int index = localInliers[i];
				Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1));
				imagePoints.col(index).copyTo(colInlierImagePoints);
				Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1));
				objectPoints.col(index).copyTo(colInlierObjectPoints);
			}
			solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true, flags);
		}
		localRvec.copyTo(rvec);
        localTvec.copyTo(tvec);
        if (_inliers.needed())
            Mat(localInliers).copyTo(_inliers);
    }
    else
    {
        tvec.setTo(Scalar(0));
        Mat R = Mat::eye(3, 3, CV_64F);
        Rodrigues(R, rvec);
        if( _inliers.needed() )
            _inliers.release();
    }
    return;
}
Esempio n. 19
0
position DetectColor(string color, int sizeC)
{
  //  std::cout << "detector_Color" << std::endl;
    std::vector<std::vector<cv::Point> > contours;
    position coordinate;
    coordinate.x = 0;
    coordinate.z = 0;
    RNG rng(12345);
    std::vector<cv::Vec4i> hierarchy;
    Point2f MaxRegCenter;
    Mat temimg;
    globalImage.copyTo(temimg);

    if (temimg.size().width != 0)
    {
        int iLowH, iLowS, iLowV, iHighH, iHighS, iHighV;
        if (color == "red")
        {
            iLowH = 0;
            iLowS = 177;
            iLowV = 237;
            iHighH = 0;
            iHighS = 255;
            iHighV = 255;
        }
        else if (color == "blue")
        {
            //abi roshan
            // ROS_INFO("blue");
            // iLowH = 98;
            // iLowS = 50;
            // iLowV = 80;
            // iHighH = 137;
            // iHighS = 255;
            // iHighV = 255;

            //abi tire
            iLowH = 98;
            iLowS = 100;
            iLowV = 175;
            iHighH = 111;
            iHighS = 199;
            iHighV = 254;


            // //abi tire
            // iLowH = 103;
            // iLowS = 120;
            // iLowV = 175;
            // iHighH = 111;
            // iHighS = 199;
            // iHighV = 254;
            //??????????

            // iLowH = 98;
            // iLowS = 50;
            // iLowV = 80;
            // iHighH = 111;
            // iHighS = 255;
            // iHighV = 254;

        }
        else if (color == "yellow")
        {
            ROS_INFO("yelloqw");
            iLowH = 25;
            iLowS = 34;
            iLowV = 101;
            iHighH = 40;
            iHighS = 254;
            iHighV = 254;
        }
        else if ((color == "black"))
        {
            iLowH = 104;
            iLowS = 0;
            iLowV = 0;
            iHighH = 104;
            iHighS = 9;
            iHighV = 80;
        }
        else if ((color == "green"))
        {

        //sabze roshan
            // iLowH = 41;
            // iLowS = 52;
            // iLowV = 170;
            // iHighH = 85;
            // iHighS = 225;
            // iHighV = 224;
        //sabze tire
            iLowH = 41;
            iLowS = 41;
            iLowV = 98;
            iHighH = 102;
            iHighS = 253;
            iHighV = 203;
        //sabz
            // iLowH = 41;
            // iLowS = 41;
            // iLowV = 98;
            // iHighH = 102;
            // iHighS = 253;
            // iHighV = 223;
        }

        Mat imgHSV;

        Mat imgThresholded = Mat::zeros( globalImage.size(), CV_8UC3 );

        ROS_INFO("detectcolo4");
        int thresh = 100;
        int max_thresh = 255;
        cvtColor(temimg, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
        ROS_INFO("slam2");


        inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded); //Threshold the image
        //morphological opening (removes small objects from the foreground)


        Mat medianImg;
        medianBlur(imgThresholded, medianImg, 21);

        imshow("medianImg", medianImg);
        waitKey(10);

        Canny( medianImg, medianImg, thresh, thresh * 2, 3 );

        cv::findContours( medianImg, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
        //  cv::drawContours(globalImage, contours, -1, Scalar::all(255), CV_FILLED);
        // ROS_INFO("570");
        // std::cout<<globalImage.size().width<<" width "<<globalImage.size().height<<" height "<<imgThresholded.size().width<<" width "<<imgThresholded.size().height<<" heith "<<std::endl;
        // Mat resultimg = globalImage & imgThresholded;
        // cvNamedWindow( "object_position", 1);
        // cv::imshow("object_position",imgThresholded);



        ////////////////////>>>>>>>>>>> accecing contours <<<<<<<<<<<<<<<<<<<<<<

        //// >>>>>>>> find centers
        /// Get the moments
        if (contours.size() != 0)
        {

            vector<Moments> mu(contours.size() );
            for ( int i = 0; i < contours.size(); i++ )
            {
                mu[i] = moments( contours[i], false );
            }

            ///  Get the mass centers:
            vector<Point2f> centers( contours.size() ); // mc -> centers
            for ( int i = 0; i < contours.size(); i++ )
            {
                centers[i] = Point2f( mu[i].m10 / mu[i].m00 , mu[i].m01 / mu[i].m00 );
            }

            /// Draw contours
            Mat drawing = Mat::zeros( imgThresholded.size(), CV_8UC3 );

            /// Calculate the area with the moments 00 and compare with the result of the OpenCV function

            const float bad_point = std::numeric_limits<float>::quiet_NaN();
            float MaxArea = -1;
            int MaxAreaIdx = -1;
            for ( int i = 0; i < contours.size(); i++ )
            {
                //printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) );
                // if (contourArea(contours[i]) > 100)
                // {
                if ( contourArea(contours[i]) > MaxArea)
                {
                    MaxArea = contourArea(contours[i]);
                    MaxAreaIdx = i;
     
                    // Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) );
                    // drawContours( imgThresholded, contours, i, color, 2, 8, hierarchy, 0, Point() );
                    // circle( imgThresholded, centers[i], 4, color, -1, 8, 0 );
                }

                //}
            }
            //Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
            //drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
            //circle( drawing, mc[i], 4, color, -1, 8, 0 );
                    //?
            if (MaxArea != -1)
            {
                  MaxRegCenter = centers[MaxArea];
                //?MaxRegCenter = centers[MaxArea];
            }
            if (MaxAreaIdx != -1)
            {
                if (contours[MaxAreaIdx].size() > sizeC)
                {

                    ROS_INFO("contoursize %d",sizeC);
                    // Mat drawing = Mat::zeros(imgThresholded.size(), CV_8UC3 );
                    Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) );
                    drawContours( drawing, contours, MaxAreaIdx, color, 2, 8, hierarchy, 0, Point() );
                    circle( drawing, centers[MaxAreaIdx], 4, color, -1, 8, 0 );
                    coordinate = calcPosition(contours[MaxAreaIdx]);
                    std::cout<<coordinate.x<<" coordinatex "<<coordinate.y<<" coordinatey "<<coordinate.z<<" coordinate.z "<<endl;
                }
            }

            if (drawing.size().width != 0)
            {
                imshow( "Contours2", drawing );
                waitKey(10);
            }
            boost::this_thread::sleep(boost::posix_time::milliseconds(1000));
            cout << "fshow_countours2" << endl;
        }
    }
    return coordinate;

}
int main(int argc, char* argv[])
{

	//-------------------------------------------------------------------image enhacment tests
	//Mat orignal, contrast, histogram, grey;

	//orignal = imread("C:\\Users\\user212\\Documents\\Visual Studio 2015\\Projects\\projectTestOpenCV\\projectTestOpenCV\\test\\test_image_enhacement_opreationsimg.jpg", CV_LOAD_IMAGE_COLOR);

	//if (orignal.empty())
	//{
	//	std::cout << "!!! Failed imread(): image not found" << std::endl;
	//	// don't let the execution continue, else imshow() will crash.
	//}

	//

	////contrast adjustment
	//	orignal.convertTo(contrast,-1,2.2,50);

	//	if (contrast.empty())
	//	{
	//		std::cout << "!!! Failed to convertto(): image not found" << std::endl;
	//		// don't let the execution continue, else imshow() will crash.
	//	}


	////histogram adjustment

	//	/// Convert to grayscale
	//	cvtColor(orignal, grey, CV_BGR2GRAY);

	//	/// Apply Histogram Equalization
	//	equalizeHist(grey, histogram);



	//namedWindow("Original Image", 0);
	//namedWindow("Contrast adjustments", 0);
	//namedWindow("Histogram adjustments", 0);

	//while (1) {
	//	
	//	imshow("Original Image", orignal);
	//	imshow("Contrast adjustments", contrast);
	//	imshow("Histogram adjustments", histogram);

	//	switch (waitKey(10))
	//	{
	//		case 32:
	//			//capture image at space bar
	//			//imwrite("test\\/orignal1.jpg", orignal);
	//			//imwrite("test\\/contrast1.jpg", contrast);
	//			imwrite("test\\/histogram1.jpg", histogram);

	//			break;

	//		case 27:
	//				//exit at escape
	//				return 0;
	//	}
	//}


	//------------------------------------------------------------------- image segmentation + init camera

//
//	//global variables  
//	Mat frame /*camera feed*/, res /*processed image*/, mask /*mask*/;
//
//	Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor  
//	Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor  
//	Ptr<BackgroundSubtractorGMG> pGMG; //MOG2 Background subtractor  
//
//	//pMOG = new BackgroundSubtractorMOG(100,5,0,1);
//	pMOG2 = new BackgroundSubtractorMOG2(1000,0,1);
//	//pGMG = new BackgroundSubtractorGMG();
//	
//	//for reading saved file
//	//char fileName[100] = "C:\\opencv\\/myVideo.avi"; //Gate1_175_p1.avi"; //mm2.avi"; //";//_p1.avi";
//	//VideoCapture stream1(fileName);
//
//	//camera initializer
//	VideoCapture cap;
//	cap.open(0);
//
//	//unconditional loop     
//	while (true) {
//
//		cap >> frame;
//		//contrast adjustment+brightness
//		//frame.convertTo(frame, -1, 0.5,0);
//
//
//
//		res = frame;
//
//		/////////////////////////////////////////using MOG -> constructor(set_input_frame, get_output_mask_here, learning_rate)
//		//pMOG->operator()(frame, mask, 0.8);
//		////enhancment of mask
//		//Mat histogram, grey;
//		///// Convert to grayscale
//		////cvtColor(frame, grey, CV_BGR2GRAY);
//
//		///// Apply Histogram Equalization
//		//equalizeHist(mask, histogram);
//
//		////test for channels in image matrix
//		////std::cout << mask.channels() << endl;
//		////std::cout << histogram.channels() << endl;
//
//		/////////////////////////////////////////using MOG2 -> constructor(set_input_frame, get_output_mask_here, learning_rate)
//		pMOG2->operator()(frame,mask,0.6);
//		
//			//enhancment of mask
//			Mat mask_histogram,grey,mask_salt,res_histogram, res_salt;
//			//Convert to grayscale
//			//cvtColor(frame, grey, CV_BGR2GRAY);
//
//			//Apply Histogram Equalization
//			equalizeHist(mask, mask_histogram);
//			
//			//salt pepper removal using median blurring
//			medianBlur(mask, mask_salt, 5);
//
//			//test for channels in image matrix
//			//std::cout << mask.channels() << endl;
//			//std::cout << histogram.channels() << endl;
//
//		/////////////////////////////////////////using GMG/////////////////////////////////////////
//		//pGMG->operator()(frame, mask);
//
//		//imshow("frame", frame);
//		//imshow("mask", mask);
//		//imshow("histogram enhanced mask", mask_histogram);
//
//		imshow("salt", mask_salt);
//
//		//????????????????? here frame from previous 10th last frame needs to be copied using max to current frame??????????????????
//		frame.copyTo(res, mask);
//		
//		frame.copyTo(res_histogram, mask_histogram);
//		frame.copyTo(res_salt, mask_salt);
//
//		/*imshow("No image enhancment", res);
//		imshow("Histogram Equalized", res_histogram);
//		imshow("Salt fixed", res_salt);
//*/
//		//listen for 10ms for a key to be pressed
//		switch (waitKey(10))
//		{
//			case 32:
//
//				//capture image at space bar
//				imwrite("captured_image\\/unprocessed.jpg", frame);
//				imwrite("captured_image\\/mask with histogram enhanced.jpg", mask_histogram);
//				imwrite("captured_image\\/mask with salt removed.jpg", mask_salt);
//				imwrite("captured_image\\/processed.jpg", res);
//
//				imshow("captured", res);
//				imshow("captured", mask_salt);
//
//				break;
//
//			case 27:
//				//exit at esc
//				return 0;
//		}
//
//	}
//


/////////////////////////////////////////////////////////////////background model calculation


//global variables  
	Mat frame /* camera feed */, 
		processed_image /* processed image */,
		initial_mask /* initial mask returned by MOG backgroud subtractor */,
		histogram_mask /* mask after histogram equalziation */,
		noise_free_mask /* mask after noise removal of salt and peper */,
		history[100],
		res
	;

	int counter_history = 0; /*sliding window history counter*/

//MOG Background subtractor initialziation
//constructor paramters : history size, number of gaussain mixtures, background ratio, noise strength)
	/*Ptr<BackgroundSubtractor> pMOG; 
	pMOG = new BackgroundSubtractorMOG(100,5,0,1);*/

//MOG2 Background subtractor initialziation
//constructor paramters : history size, threshold value, shadow detection 
	Ptr<BackgroundSubtractor> pMOG2; 
	pMOG2 = new BackgroundSubtractorMOG2(300,0, 0);

//camera initializer
	VideoCapture cap;
	cap.open(0);

	//sliding window history logic
	while (1) {
		
		//transfer feed to pre-intialized matrix
		cap >> frame;
		
		//image enhancement of gain (contrast) and bias (brightness) adustments
		//constructor : resulting (enhanced) image, ??, alpha/gain, beta/bias
		//frame.convertTo(frame, -1, 0.5, 0);

		//make a copy of frame for later comparision
		frame.copyTo(res);
		
		//exit if buffer exceeds 100
		if (counter_history == 100) {
			std::cout << "Applciation timed out. buffer exceeded 100 frames." << counter_history << endl;
			return 0;
		}
		
		//make a copy of frame for sliding window history
		waitKey(1); //buffer for not crashing system
		frame.copyTo(history[counter_history]);
		
		//wait untill 10th frame is obtained. to initialize history array
		if (counter_history > 10) {
			
			//output frame number for debugging purposes
			std::cout << "Frame feed No. " << counter_history << endl;
		
			////MOG implenetation
			////constructor : input image, resulting mask, learning_rate
			//	pMOG->operator()(frame, initial_mask, 0.8);
			//
			//	//enhancment of mask using histogram equlzation - constructor : input image (1 channel), output image (1 channel) 
			//	//Apply Histogram Equalization
			//	equalizeHist(initial_mask, histogram_mask);

			//	//noise reduction of salt and pepper using median blurring
			//	//constructor - input image (1 channel), output image (1 channel), k size (?)
			//		medianBlur(initial_mask, noise_free_mask, 5); //using input without histogram initialization
			//		//medianBlur(histogram_mask, noise_free_mask, 5); //using input with histogram initialization

			//	//test for checking channels in image matrix
			//	////std::cout << initial_mask.channels() << endl;
			//	////std::cout << histogram_mask.channels() << endl;

			//MOG2 implementation (image segmentation)
			//constructor : input image, resulting mask, learning_rate
			pMOG2->operator()(frame, initial_mask, 0.2);

			//enhancment of mask using histogram equlzation  
			//constructor : input image (1 channel), output image (1 channel)
			equalizeHist(initial_mask, histogram_mask);

			//noise reduction of salt and pepper using median blurring
			//constructor - input image (1 channel), output image (1 channel), k size (?)
			medianBlur(initial_mask, noise_free_mask, 5); //using input without histogram initialization
														  //medianBlur(histogram_mask, noise_free_mask, 5); //using input with histogram initialization

														  //test for channels in image matrix
														  //std::cout << mask.channels() << endl;
														  //std::cout << histogram.channels() << endl;
		
			//Morphological opreations
				//dilation
				//constructor : input image (1 channel), dilated image (1 image)
				dilate(noise_free_mask, noise_free_mask, MORPH_CROSS);
			
				//erosion
				//constructor : input image (1 channel), dilated image (1 image)
				erode(noise_free_mask, noise_free_mask, MORPH_CROSS);

			//Alpha Channel usage for traspareny addition of mask. smoothening
				//alpha is the transperny value. value of stimulated alpha channel
				//reconsider this?????
				/*double alpha=0.05, beta;
				beta = (0.5 - alpha);
				addWeighted(frame, alpha, history[counter_history - 10], beta, 0.0, history[counter_history - 10]);*/

			//Overwrite last frame from sliding window using generated mask of segmentation
			history[counter_history - 10].copyTo(frame ,noise_free_mask);
			
			//output results
			imshow("Orignal Image", res);
			imshow("Final processed image", frame);
			imshow("Final mask", noise_free_mask);
		}

		//event handling on preview
		switch (waitKey(10))
		{
			//capture image at space bar
			case 32:

				//freeze output window from feed
				imshow("Orignal Image", res);
				imshow("Final processed image", frame);
				imshow("Final mask", noise_free_mask);
				std::cout << "Image written from feed. frame no. " << counter_history << endl;

				imwrite("test\\captured_image\\/orignal iamge.jpg", res);
				imwrite("test\\captured_image\\/final processed image.jpg", frame);
				imwrite("test\\captured_image\\/final mask.jpg", noise_free_mask);

				break;

			//exit at esc
			case 27:
				return 0;
		}

		//increment frame count
		counter_history++;
	}


//unconditional loop for uninterupted webcam feed
	while (false) {

		cap >> frame;

		//image enhancement of gain (contrast) and bias (brightness) adustments
		//constructor : resulting (enhanced) image, ??, alpha/gain, beta/bias
		frame.convertTo(frame, -1, 0.5,0);

		////MOG implenetation
		////constructor : input image, resulting mask, learning_rate
		//	pMOG->operator()(frame, initial_mask, 0.8);
		//
		//	//enhancment of mask using histogram equlzation - constructor : input image (1 channel), output image (1 channel) 
		//	//Apply Histogram Equalization
		//	equalizeHist(initial_mask, histogram_mask);

		//	//noise reduction of salt and pepper using median blurring
		//	//constructor - input image (1 channel), output image (1 channel), k size (?)
		//		medianBlur(initial_mask, noise_free_mask, 5); //using input without histogram initialization
		//		//medianBlur(histogram_mask, noise_free_mask, 5); //using input with histogram initialization

		//	//test for checking channels in image matrix
		//	////std::cout << initial_mask.channels() << endl;
		//	////std::cout << histogram_mask.channels() << endl;

		//MOG2 implementation
		//constructor : input image, resulting mask, learning_rate
			pMOG2->operator()(frame, initial_mask,0.6);
		
			//enhancment of mask using histogram equlzation 
			//constructor : input image (1 channel), output image (1 channel)
				equalizeHist(initial_mask, histogram_mask);

			//noise reduction of salt and pepper using median blurring
			//constructor - input image (1 channel), output image (1 channel), k size (?)
				medianBlur(initial_mask, noise_free_mask, 5); //using input without histogram initialization
				//medianBlur(histogram_mask, noise_free_mask, 5); //using input with histogram initialization

			//test for channels in image matrix
			//std::cout << mask.channels() << endl;
			//std::cout << histogram.channels() << endl;

		//preview tempororary results
		//imshow("salt", noise_free_mask);

		//????????????????? here frame from previous 10th last frame needs to be copied using max to current frame??????????????????
		//frame.copyTo(processed_image, noise_free_mask);
		
		
		//imshow("captured", noise_free_mask);

		//event handling on preview
		switch (waitKey(10))
		{
			//capture image at space bar
			case 32:
				//imwrite("test\\captured_image\\/processed_image.jpg", noise_free_mask); //save image as jepg
				imshow("captured", processed_image); // show image in seprate windo

				break;

			//exit at esc
			case 27:
				return 0;
		}

	}
}
// Create a grayscale face image that has a standard size and contrast & brightness.
// "srcImg" should be a copy of the whole color camera frame, so that it can draw the eye positions onto.
// If 'doLeftAndRightSeparately' is true, it will process left & right sides seperately,
// so that if there is a strong light on one side but not the other, it will still look OK.
// Performs Face Preprocessing as a combination of:
//  - geometrical scaling, rotation and translation using Eye Detection,
//  - smoothing away image noise using a Bilateral Filter,
//  - standardize the brightness on both left and right sides of the face independently using separated Histogram Equalization,
//  - removal of background and hair using an Elliptical Mask.
// Returns either a preprocessed face square image or NULL (ie: couldn't detect the face and 2 eyes).
// If a face is found, it can store the rect coordinates into 'storeFaceRect' and 'storeLeftEye' & 'storeRightEye' if given,
// and eye search regions into 'searchedLeftEye' & 'searchedRightEye' if given.
Mat getPreprocessedFace(Mat &srcImg, int desiredFaceWidth, CascadeClassifier &faceCascade, CascadeClassifier &eyeCascade1, CascadeClassifier &eyeCascade2, bool doLeftAndRightSeparately, Rect *storeFaceRect, Point *storeLeftEye, Point *storeRightEye, Rect *searchedLeftEye, Rect *searchedRightEye)
{
    // Use square faces.
    int desiredFaceHeight = desiredFaceWidth;

    // Mark the detected face region and eye search regions as invalid, in case they aren't detected.
    if (storeFaceRect)
        storeFaceRect->width = -1;
    if (storeLeftEye)
        storeLeftEye->x = -1;
    if (storeRightEye)
        storeRightEye->x= -1;
    if (searchedLeftEye)
        searchedLeftEye->width = -1;
    if (searchedRightEye)
        searchedRightEye->width = -1;

    // Find the largest face.
    Rect faceRect;
    detectLargestObject(srcImg, faceCascade, faceRect,SCALEDWIDTH);

    // Check if a face was detected.
    if (faceRect.width > 0) {

        // Give the face rect to the caller if desired.
        if (storeFaceRect)
            *storeFaceRect = faceRect;

        Mat faceImg = srcImg(faceRect);    // Get the detected face image.

        // If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
        Mat gray;
        if (faceImg.channels() == 3) {
            cvtColor(faceImg, gray, CV_BGR2GRAY);
        }
        else if (faceImg.channels() == 4) {
            cvtColor(faceImg, gray, CV_BGRA2GRAY);
        }
        else {
            // Access the input image directly, since it is already grayscale.
            gray = faceImg;
        }

        // Search for the 2 eyes at the full resolution, since eye detection needs max resolution possible!
        Point leftEye, rightEye;
        detectBothEyes(gray, eyeCascade1, eyeCascade2, leftEye, rightEye, searchedLeftEye, searchedRightEye);

        // Give the eye results to the caller if desired.
        if (storeLeftEye)
            *storeLeftEye = leftEye;
        if (storeRightEye)
            *storeRightEye = rightEye;

        // Check if both eyes were detected.
        if (leftEye.x >= 0 && rightEye.x >= 0) {

            // Make the face image the same size as the training images.

            // Since we found both eyes, lets rotate & scale & translate the face so that the 2 eyes
            // line up perfectly with ideal eye positions. This makes sure that eyes will be horizontal,
            // and not too far left or right of the face, etc.

            // Get the center between the 2 eyes.
            Point2f eyesCenter = Point2f( (leftEye.x + rightEye.x) * 0.5f, (leftEye.y + rightEye.y) * 0.5f );
            // Get the angle between the 2 eyes.
            double dy = (rightEye.y - leftEye.y);
            double dx = (rightEye.x - leftEye.x);
            double len = sqrt(dx*dx + dy*dy);
            double angle = atan2(dy, dx) * 180.0/CV_PI; // Convert from radians to degrees.

            // Hand measurements shown that the left eye center should ideally be at roughly (0.19, 0.14) of a scaled face image.
            const double DESIRED_RIGHT_EYE_X = (1.0f - DESIRED_LEFT_EYE_X);
            // Get the amount we need to scale the image to be the desired fixed size we want.
            double desiredLen = (DESIRED_RIGHT_EYE_X - DESIRED_LEFT_EYE_X) * desiredFaceWidth;
            double scale = desiredLen / len;
            // Get the transformation matrix for rotating and scaling the face to the desired angle & size.
            Mat rot_mat = getRotationMatrix2D(eyesCenter, angle, scale);
            // Shift the center of the eyes to be the desired center between the eyes.
            rot_mat.at<double>(0, 2) += desiredFaceWidth * 0.5f - eyesCenter.x;
            rot_mat.at<double>(1, 2) += desiredFaceHeight * DESIRED_LEFT_EYE_Y - eyesCenter.y;

            // Rotate and scale and translate the image to the desired angle & size & position!
            // Note that we use 'w' for the height instead of 'h', because the input face has 1:1 aspect ratio.
            Mat warped = Mat(desiredFaceHeight, desiredFaceWidth, CV_8U, Scalar(128)); // Clear the output image to a default grey.
            warpAffine(gray, warped, rot_mat, warped.size());
            //imshow("warped", warped);

            // Give the image a standard brightness and contrast, in case it was too dark or had low contrast.
            if (!doLeftAndRightSeparately) {
                // Do it on the whole face.
                equalizeHist(warped, warped);
            }
            else {
                // Do it seperately for the left and right sides of the face.
                equalizeLeftAndRightHalves(warped);
            }
            //imshow("equalized", warped);

            // Use the "Bilateral Filter" to reduce pixel noise by smoothing the image, but keeping the sharp edges in the face.
            Mat filtered = Mat(warped.size(), CV_8U);
            bilateralFilter(warped, filtered, 0, 20.0, 2.0);
            //imshow("filtered", filtered);

            // Filter out the corners of the face, since we mainly just care about the middle parts.
            // Draw a filled ellipse in the middle of the face-sized image.
            Mat mask = Mat(warped.size(), CV_8U, Scalar(0)); // Start with an empty mask.
            Point faceCenter = Point( desiredFaceWidth/2, cvRound(desiredFaceHeight * FACE_ELLIPSE_CY) );
            Size size = Size( cvRound(desiredFaceWidth * FACE_ELLIPSE_W), cvRound(desiredFaceHeight * FACE_ELLIPSE_H) );
            ellipse(mask, faceCenter, size, 0, 0, 360, Scalar(255), CV_FILLED);
            //imshow("mask", mask);

            // Use the mask, to remove outside pixels.
            Mat dstImg = Mat(warped.size(), CV_8U, Scalar(128)); // Clear the output image to a default gray.
            /*
            namedWindow("filtered");
            imshow("filtered", filtered);
            namedWindow("dstImg");
            imshow("dstImg", dstImg);
            namedWindow("mask");
            imshow("mask", mask);
            */
            // Apply the elliptical mask on the face.
            filtered.copyTo(dstImg, mask);  // Copies non-masked pixels from filtered to dstImg.
            //imshow("dstImg", dstImg);

            return dstImg;
        }
        /*
        else {
            // Since no eyes were found, just do a generic image resize.
            resize(gray, tmpImg, Size(w,h));
        }
        */
    }
    return Mat();
}
Esempio n. 22
0
void findFeaturePoint(Mat src, vector<Point>& point, vector<Rect>& rect_res,
		int FLAG) {
	LOGD_ERR("findFeaturePoint Enter");
	char name[10];
	sprintf(name, "Step %d", FLAG);
	Mat src_mask = Mat(src.size(), CV_8U);
	Mat thresholdRes = Mat(src.size(), CV_8U);
	Mat thresholdResCopy = Mat(src.size(), CV_8U);
	eclipseMask(src, src_mask);
	if (FLAG)
		equalizeLeftAndRightHalves(src_mask);

//imshow("After Equalize", src_mask);
	double minVal = 0;
	double maxVal = 0;

	minMaxLoc(src_mask, &minVal, &maxVal, NULL, NULL);
//threshold(src, threRes, maxVal-10, 255, THRESH_BINARY);
	threshold(src_mask, thresholdRes, minVal + 7.5, 255, THRESH_BINARY);
	thresholdRes.copyTo(thresholdResCopy);
//LOGD_ERR("Threshold Over");
//imshow(name, thresholdRes);
	vector<Vec4i> hierarchy;
	vector < vector<Point2i> > contours;
	findContours(thresholdResCopy.rowRange(0, thresholdRes.rows / 2), contours,
			hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
//LOGD_ERR("findContours Over");
	sort(contours.begin(), contours.end(), my_cmp);
//LOGD_ERR("sort Over");
//cout << contours.size() << endl;
//LOGD_ERR(contours.size());
	Rect rect;
	if (contours.size() < 2)
		return;
	switch (FLAG) {
	case 0:
		for (int i = 0; i < 2; i++) {

			rect = boundingRect(contours[i]);

			point.push_back(
					Point((rect.tl().x + rect.br().x) * 0.5,
							(rect.tl().y + rect.br().y) * 0.5));
			rect_res.push_back(rect);
			//rectangle(src, rect, M_RED);

		}
		break;
	case 1:
		Mat subThreshold;
		int MAX_RIGHT_I = 0, MAX_RIGHT_J = 0;
		int MAX_LEFT_I = 1000, MAX_LEFT_J = 1000;
		float curV;
		for (int k = 0; k < 2; k++) {

			rect = boundingRect(contours[k]);
			subThreshold =
					thresholdRes.colRange(rect.tl().x, rect.br().x).rowRange(
							rect.tl().y, rect.br().y);
			//imshow("SSS",subThreshold);
			for (int i = 0; i < subThreshold.size().height; i++)
				for (int j = 0; j < subThreshold.size().width; j++) {

					try {
						curV = subThreshold.data[i * subThreshold.size().width
								* subThreshold.channels() + j];
					} catch (cv::Exception& e) {
						cout << e.what() << endl;
					}

					if (curV <= 10) {
						if (j >= MAX_RIGHT_J) {

							MAX_RIGHT_I = i;
							MAX_RIGHT_J = j;
						}

						if (j <= MAX_LEFT_J) {

							MAX_LEFT_I = i;
							MAX_LEFT_J = j;

						}
					}

				}
			point.push_back(
					Point(MAX_LEFT_J + rect.tl().x, MAX_LEFT_I + rect.tl().y));
			point.push_back(
					Point(MAX_RIGHT_J + rect.tl().x,
							MAX_RIGHT_I + rect.tl().y));
			rect_res.push_back(rect);
			//rectangle(src, rect, M_RED);
		}
		break;
	}
	LOGD_ERR("findFeaturePoint Exit");
}
Esempio n. 23
0
void useFile() {
    string filename("");
    cout << "Filename : ";
    getline(cin, filename);

    Mat img;

    //Check if errors
    if(!loadImage(img, filename)) {
        return;
    }

    cout << "Load successful !\n\n";

    //Display initial picture
    namedWindow("initial picture", WINDOW_AUTOSIZE);
    imshow("initial picture", img);
    waitKey(0); //Wait before next step
    destroyWindow("initial picture");

    //Display grayscale picture
    namedWindow("grayscale picture", WINDOW_AUTOSIZE);
    convertImageToGrayScale(img);
    imshow("grayscale picture", img);
    waitKey(0); //Wait before next step
    destroyWindow("grayscale picture");
    saveImage("1_grayscale", img);

    //Display reduce picture
    string window("before reduce picture"), window2("after reduce picture");
    namedWindow(window, WINDOW_AUTOSIZE);
    int angle(100),
        electrodes_width(10), electrodes_height(6),
        width(img.size().width), height(img.size().height);
    Mat baseImg;
    img.copyTo(baseImg);
    createTrackbar("angle (%)", window, &angle, 100);
    createTrackbar("width", window, &electrodes_width, width);
    createTrackbar("height", window, &electrodes_height, height);
    while(static_cast<char>(waitKey(1)) != 13) {
        baseImg.copyTo(img);

        reduceImage(img, angle, (double)electrodes_height / (double)electrodes_width);

        imshow(window, baseImg);
        imshow(window2, img);
    }
    destroyWindow(window);
    destroyWindow(window2);
    saveImage("2_reduce", img);

    //Display pixelise picture
    window = "before pixelise picture";
    window2 = "after pixelise picture";
    namedWindow(window, WINDOW_AUTOSIZE);
    Mat zoomImg;
    img.copyTo(baseImg);
    int zoom(1);
    createTrackbar("zoom", window, &zoom, 20);
    while(static_cast<char>(waitKey(1)) != 13) {
        baseImg.copyTo(img);

        pixeliseImage(img, electrodes_width, electrodes_height);

        img.copyTo(zoomImg);
        extendImage(zoomImg, zoom);

        imshow(window, baseImg);
        imshow(window2, zoomImg);
    }
    destroyWindow(window);
    destroyWindow(window2);
    saveImage("3_pixelise", img);

    //Display reverse picture
    namedWindow("reverse picture", WINDOW_AUTOSIZE);
    reverseImage(img);
    saveImage("4_reverse", img);
    extendImage(img, zoom);
    imshow("reverse picture", img);
    waitKey(0); //Wait before next step
    destroyWindow("reverse picture");
}
Esempio n. 24
0
    void CV_BilateralFilterTest::reference_bilateral_filter(const Mat &src, Mat &dst, int d,
        double sigma_color, double sigma_space, int borderType)
    {
        int cn = src.channels();
        int i, j, k, maxk, radius;
        double minValSrc = -1, maxValSrc = 1;
        const int kExpNumBinsPerChannel = 1 << 12;
        int kExpNumBins = 0;
        float lastExpVal = 1.f;
        float len, scale_index;
        Size size = src.size();

        dst.create(size, src.type());

        CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) &&
            src.type() == dst.type() && src.size() == dst.size() &&
            src.data != dst.data );

        if( sigma_color <= 0 )
            sigma_color = 1;
        if( sigma_space <= 0 )
            sigma_space = 1;

        double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
        double gauss_space_coeff = -0.5/(sigma_space*sigma_space);

        if( d <= 0 )
            radius = cvRound(sigma_space*1.5);
        else
            radius = d/2;
        radius = MAX(radius, 1);
        d = radius*2 + 1;
        // compute the min/max range for the input image (even if multichannel)

        minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
        if(std::abs(minValSrc - maxValSrc) < FLT_EPSILON)
        {
            src.copyTo(dst);
            return;
        }

        // temporary copy of the image with borders for easy processing
        Mat temp;
        copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
        patchNaNs(temp);

        // allocate lookup tables
        vector<float> _space_weight(d*d);
        vector<int> _space_ofs(d*d);
        float* space_weight = &_space_weight[0];
        int* space_ofs = &_space_ofs[0];

        // assign a length which is slightly more than needed
        len = (float)(maxValSrc - minValSrc) * cn;
        kExpNumBins = kExpNumBinsPerChannel * cn;
        vector<float> _expLUT(kExpNumBins+2);
        float* expLUT = &_expLUT[0];

        scale_index = kExpNumBins/len;

        // initialize the exp LUT
        for( i = 0; i < kExpNumBins+2; i++ )
        {
            if( lastExpVal > 0.f )
            {
                double val =  i / scale_index;
                expLUT[i] = (float)std::exp(val * val * gauss_color_coeff);
                lastExpVal = expLUT[i];
            }
            else
                expLUT[i] = 0.f;
        }

        // initialize space-related bilateral filter coefficients
        for( i = -radius, maxk = 0; i <= radius; i++ )
            for( j = -radius; j <= radius; j++ )
            {
                double r = std::sqrt((double)i*i + (double)j*j);
                if( r > radius )
                    continue;
                space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
                space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn);
            }

        for( i = 0; i < size.height; i++ )
        {
            const float* sptr = (const float*)(temp.data + (i+radius)*temp.step) + radius*cn;
            float* dptr = (float*)(dst.data + i*dst.step);

            if( cn == 1 )
            {
                for( j = 0; j < size.width; j++ )
                {
                    float sum = 0, wsum = 0;
                    float val0 = sptr[j];
                    for( k = 0; k < maxk; k++ )
                    {
                        float val = sptr[j + space_ofs[k]];
                        float alpha = (float)(std::abs(val - val0)*scale_index);
                        int idx = cvFloor(alpha);
                        alpha -= idx;
                        float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
                        sum += val*w;
                        wsum += w;
                    }
                    dptr[j] = (float)(sum/wsum);
                }
            }
            else
            {
                assert( cn == 3 );
                for( j = 0; j < size.width*3; j += 3 )
                {
                    float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;
                    float b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2];
                    for( k = 0; k < maxk; k++ )
                    {
                        const float* sptr_k = sptr + j + space_ofs[k];
                        float b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
                        float alpha = (float)((std::abs(b - b0) +
                            std::abs(g - g0) + std::abs(r - r0))*scale_index);
                        int idx = cvFloor(alpha);
                        alpha -= idx;
                        float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
                        sum_b += b*w; sum_g += g*w; sum_r += r*w;
                        wsum += w;
                    }
                    wsum = 1.f/wsum;
                    b0 = sum_b*wsum;
                    g0 = sum_g*wsum;
                    r0 = sum_r*wsum;
                    dptr[j] = b0; dptr[j+1] = g0; dptr[j+2] = r0;
                }
            }
        }
    }
Esempio n. 25
0
 static bool to(PyObject* obj, vector<_Tp>& value, const char* name="<unknown>")
 {
     typedef typename DataType<_Tp>::channel_type _Cp;
     if(!obj || obj == Py_None)
         return true;
     if (PyArray_Check(obj))
     {
         Mat m;
         pyopencv_to(obj, m, name);
         m.copyTo(value);
     }
     if (!PySequence_Check(obj))
         return false;
     PyObject *seq = PySequence_Fast(obj, name);
     if (seq == NULL)
         return false;
     int i, j, n = (int)PySequence_Fast_GET_SIZE(seq);
     value.resize(n);
     
     int type = DataType<_Tp>::type;
     int depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);
     PyObject** items = PySequence_Fast_ITEMS(seq);
     
     for( i = 0; i < n; i++ )
     {
         PyObject* item = items[i];
         PyObject* seq_i = 0;
         PyObject** items_i = &item;
         _Cp* data = (_Cp*)&value[i];
         
         if( channels == 2 && PyComplex_CheckExact(item) )
         {
             Py_complex c = PyComplex_AsCComplex(obj);
             data[0] = saturate_cast<_Cp>(c.real);
             data[1] = saturate_cast<_Cp>(c.imag);
             continue;
         }
         if( channels > 1 )
         {
             if( PyArray_Check(item))
             {
                 Mat src;
                 pyopencv_to(item, src, name);
                 if( src.dims != 2 || src.channels() != 1 ||
                    ((src.cols != 1 || src.rows != channels) &&
                     (src.cols != channels || src.rows != 1)))
                     break;
                 Mat dst(src.rows, src.cols, depth, data);
                 src.convertTo(dst, type);
                 if( dst.data != (uchar*)data )
                     break;
                 continue;
             }
             
             seq_i = PySequence_Fast(item, name);
             if( !seq_i || (int)PySequence_Fast_GET_SIZE(seq_i) != channels )
             {
                 Py_XDECREF(seq_i);
                 break;
             }
             items_i = PySequence_Fast_ITEMS(seq_i);
         }
         
         for( j = 0; j < channels; j++ )
         {
             PyObject* item_ij = items_i[j];
             if( PyInt_Check(item_ij))
             {
                 int v = PyInt_AsLong(item_ij);
                 if( v == -1 && PyErr_Occurred() )
                     break;
                 data[j] = saturate_cast<_Cp>(v);
             }
             else if( PyFloat_Check(item_ij))
             {
                 double v = PyFloat_AsDouble(item_ij);
                 if( PyErr_Occurred() )
                     break;
                 data[j] = saturate_cast<_Cp>(v);
             }
             else
                 break;
         }
         Py_XDECREF(seq_i);
         if( j < channels )
             break;
     }
     Py_DECREF(seq);
     return i == n;
 }
Esempio n. 26
0
int main(int argc, char** argv)
{
	VideoCapture capture;
	char* video = argv[1];
	int flag = arg_parse(argc, argv);
	capture.open(video);

	if(!capture.isOpened()) {
		fprintf(stderr, "Could not initialize capturing..\n");
		return -1;
	}

	int frame_num = 0;
	TrackInfo trackInfo;
	DescInfo hogInfo, hofInfo, mbhInfo;

	InitTrackInfo(&trackInfo, track_length, init_gap);
	InitDescInfo(&hogInfo, 8, false, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&hofInfo, 9, true, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&mbhInfo, 8, false, patch_size, nxy_cell, nt_cell);

	SeqInfo seqInfo;
	InitSeqInfo(&seqInfo, video);

	if(flag)
		seqInfo.length = end_frame - start_frame + 1;

//	fprintf(stderr, "video size, length: %d, width: %d, height: %d\n", seqInfo.length, seqInfo.width, seqInfo.height);

	if(show_track == 1)
		namedWindow("DenseTrack", 0);

	Mat image, prev_grey, grey;

	std::vector<float> fscales(0);
	std::vector<Size> sizes(0);

	std::vector<Mat> prev_grey_pyr(0), grey_pyr(0), flow_pyr(0);
	std::vector<Mat> prev_poly_pyr(0), poly_pyr(0); // for optical flow

	std::vector<std::list<Track> > xyScaleTracks;
	int init_counter = 0; // indicate when to detect new feature points
	while(true) {
		Mat frame;
		int i, j, c;

		// get a new frame
		capture >> frame;
		if(frame.empty())
			break;

		if(frame_num < start_frame || frame_num > end_frame) {
			frame_num++;
			continue;
		}

		if(frame_num == start_frame) {
			image.create(frame.size(), CV_8UC3);
			grey.create(frame.size(), CV_8UC1);
			prev_grey.create(frame.size(), CV_8UC1);

			InitPry(frame, fscales, sizes);

			BuildPry(sizes, CV_8UC1, prev_grey_pyr);
			BuildPry(sizes, CV_8UC1, grey_pyr);

			BuildPry(sizes, CV_32FC2, flow_pyr);
			BuildPry(sizes, CV_32FC(5), prev_poly_pyr);
			BuildPry(sizes, CV_32FC(5), poly_pyr);

			xyScaleTracks.resize(scale_num);

			frame.copyTo(image);
			cvtColor(image, prev_grey, CV_BGR2GRAY);

			for(int iScale = 0; iScale < scale_num; iScale++) {
				if(iScale == 0)
					prev_grey.copyTo(prev_grey_pyr[0]);
				else
					resize(prev_grey_pyr[iScale-1], prev_grey_pyr[iScale], prev_grey_pyr[iScale].size(), 0, 0, INTER_LINEAR);

				// dense sampling feature points
				std::vector<Point2f> points(0);
				DenseSample(prev_grey_pyr[iScale], points, quality, min_distance);

				// save the feature points
				std::list<Track>& tracks = xyScaleTracks[iScale];
				for(i = 0; i < points.size(); i++)
					tracks.push_back(Track(points[i], trackInfo, hogInfo, hofInfo, mbhInfo));
			}

			// compute polynomial expansion
			my::FarnebackPolyExpPyr(prev_grey, prev_poly_pyr, fscales, 7, 1.5);

			frame_num++;
			continue;
		}

		init_counter++;
		frame.copyTo(image);
		cvtColor(image, grey, CV_BGR2GRAY);

		// compute optical flow for all scales once
		my::FarnebackPolyExpPyr(grey, poly_pyr, fscales, 7, 1.5);
		my::calcOpticalFlowFarneback(prev_poly_pyr, poly_pyr, flow_pyr, 10, 2);

		for(int iScale = 0; iScale < scale_num; iScale++) {
			if(iScale == 0)
				grey.copyTo(grey_pyr[0]);
			else
				resize(grey_pyr[iScale-1], grey_pyr[iScale], grey_pyr[iScale].size(), 0, 0, INTER_LINEAR);

			int width = grey_pyr[iScale].cols;
			int height = grey_pyr[iScale].rows;

			// compute the integral histograms
			DescMat* hogMat = InitDescMat(height+1, width+1, hogInfo.nBins);
			HogComp(prev_grey_pyr[iScale], hogMat->desc, hogInfo);

			DescMat* hofMat = InitDescMat(height+1, width+1, hofInfo.nBins);
			HofComp(flow_pyr[iScale], hofMat->desc, hofInfo);

			DescMat* mbhMatX = InitDescMat(height+1, width+1, mbhInfo.nBins);
			DescMat* mbhMatY = InitDescMat(height+1, width+1, mbhInfo.nBins);
			MbhComp(flow_pyr[iScale], mbhMatX->desc, mbhMatY->desc, mbhInfo);

			// track feature points in each scale separately
			std::list<Track>& tracks = xyScaleTracks[iScale];
			for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end();) {
				int index = iTrack->index;
				Point2f prev_point = iTrack->point[index];
				int x = std::min<int>(std::max<int>(cvRound(prev_point.x), 0), width-1);
				int y = std::min<int>(std::max<int>(cvRound(prev_point.y), 0), height-1);

				Point2f point;
				point.x = prev_point.x + flow_pyr[iScale].ptr<float>(y)[2*x];
				point.y = prev_point.y + flow_pyr[iScale].ptr<float>(y)[2*x+1];
 
				if(point.x <= 0 || point.x >= width || point.y <= 0 || point.y >= height) {
					iTrack = tracks.erase(iTrack);
					continue;
				}



        //added **********
        std::vector<Point2f> trajectory(trackInfo.length+1);
        for(int i = 0; i <= trackInfo.length; ++i)
        {
              trajectory[i] = iTrack->point[i]*fscales[iScale];
                    
        }
              //done
		    	  
      
      
      
        // get the descriptors for the feature point
				RectInfo rect;
				GetRect(prev_point, rect, width, height, hogInfo);
				GetDesc(hogMat, rect, hogInfo, iTrack->hog, index);
				GetDesc(hofMat, rect, hofInfo, iTrack->hof, index);
				GetDesc(mbhMatX, rect, mbhInfo, iTrack->mbhX, index);
				GetDesc(mbhMatY, rect, mbhInfo, iTrack->mbhY, index);
				iTrack->addPoint(point);

				// draw the trajectories at the first scale
				if(show_track == 1 && iScale == 0)
					DrawTrack(iTrack->point, iTrack->index, fscales[iScale], image);

				// if the trajectory achieves the maximal length
				if(iTrack->index >= trackInfo.length) {
					std::vector<Point2f> trajectory(trackInfo.length+1);
					for(int i = 0; i <= trackInfo.length; ++i)
						trajectory[i] = iTrack->point[i]*fscales[iScale];
				
					float mean_x(0), mean_y(0), var_x(0), var_y(0), length(0);
					if(IsValid(trajectory, mean_x, mean_y, var_x, var_y, length)) {
          //	printf("%d\t%f\t%f\t%f\t%f\t%f\t%f\t", frame_num, mean_x, mean_y, var_x, var_y, length, fscales[iScale]);

						// for spatio-temporal pyramid
      		/* dont need this features
						printf("%f\t", std::min<float>(std::max<float>(mean_x/float(seqInfo.width), 0), 0.999));
						printf("%f\t", std::min<float>(std::max<float>(mean_y/float(seqInfo.height), 0), 0.999));
						printf("%f\t", std::min<float>(std::max<float>((frame_num - trackInfo.length/2.0 - start_frame)/float(seqInfo.length), 0), 0.999));
				  

						// output the trajectory
						for (int i = 0; i < trackInfo.length; ++i)
  							printf("%f\t%f\t", trajectory[i].x,trajectory[i].y);

      		  */
            /* dont need this features
						PrintDesc(iTrack->hog, hogInfo, trackInfo);
						PrintDesc(iTrack->hof, hofInfo, trackInfo);
						PrintDesc(iTrack->mbhX, mbhInfo, trackInfo);
						PrintDesc(iTrack->mbhY, mbhInfo, trackInfo);
            */

						printf("\n");
					}

					iTrack = tracks.erase(iTrack);
					continue;
				}
				++iTrack;
			}
			ReleDescMat(hogMat);
			ReleDescMat(hofMat);
			ReleDescMat(mbhMatX);
			ReleDescMat(mbhMatY);

			if(init_counter != trackInfo.gap)
				continue;

			// detect new feature points every initGap frames
			std::vector<Point2f> points(0);
			for(std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); iTrack++)
				points.push_back(iTrack->point[iTrack->index]);

			DenseSample(grey_pyr[iScale], points, quality, min_distance);
			// save the new feature points
			for(i = 0; i < points.size(); i++)
				tracks.push_back(Track(points[i], trackInfo, hogInfo, hofInfo, mbhInfo));
		}

		init_counter = 0;
		grey.copyTo(prev_grey);
		for(i = 0; i < scale_num; i++) {
			grey_pyr[i].copyTo(prev_grey_pyr[i]);
			poly_pyr[i].copyTo(prev_poly_pyr[i]);
		}

		frame_num++;

		if( show_track == 1 ) {
			imshow( "DenseTrack", image);
			c = cvWaitKey(3);
			if((char)c == 27) break;
		}
	}

	if( show_track == 1 )
		destroyWindow("DenseTrack");





	return 0;
}
int main( int argc, char** argv)
{

	string face_cascade_name = "../xintai.xml";
	CascadeClassifier face_cascade;

	if(!face_cascade.load( face_cascade_name ) )
	{
		std::cout<<"can not load the face model "<<std::endl;
		return -1;
	}

	string image_path = "/home/yuanyang/workspace/face_align_one_millisecond/face_landmark_data/lfpw/trainset/";
	fs::path input_path( image_path );

	if(!fs::exists(input_path))
	{
		std::cout<<"input_path "<<input_path<<" not exist "<<std::endl;
	}

	if(!fs::is_directory(input_path))
	{
		std::cout<<"input_path is not a directory "<<input_path<<std::endl;
	}

	
	FacedetWithFilter fff;
	fff.init("../svm_w.xml","../svm_b.xml");
	
    fff.setConfidenceParas( 2.0, 0.1, -10);		//²ÎÊýÒâÒåŒûÀකÒå


	/* load face detector from dlib */
	dlib::frontal_face_detector detector = dlib::get_frontal_face_detector();
	
	
	fs::directory_iterator end_it;
	for( fs::directory_iterator file_iter(input_path); file_iter != end_it; file_iter++ )
    {
		fs::path s = *(file_iter);
		
        string basename = fs::basename( s );
        string pathname = file_iter->path().string();
		string extname  = fs::extension( *(file_iter) );
		if( extname!=".jpg" &&
			extname!=".png")
		{
            //std::cout<<"do no support ext name "<<extname<<std::endl;
			continue;
		}

		Mat im = imread( pathname );

		dlib::cv_image<dlib::bgr_pixel> input_im(im);
		
		dlib::array2d<dlib::bgr_pixel> img;
		dlib::assign_image( img, input_im );
		
		int min_ = 50;
		int max_ = min( im.cols, im.rows);
		double scale_factor = 1.05;

        fff.setDetectionParas( min_ , max_ , scale_factor, 0.1);	//Œì²âµÄ²ÎÊý
		
		vector<Rect> faces;
		faces.reserve(10);

		Mat cp1,cp2,cp3;
		im.copyTo(cp1);
		im.copyTo(cp2);
		im.copyTo(cp3);


		fff.setConfidenceParas(2.5, 0.2, -10);

		double t = cv::getTickCount();
        fff.detectAndFilter( im , faces);
	//	fff.detectFace(im , faces );
		t = (double)cv::getTickCount() - t;
		std::cout<<"time duration pico det and filter is "<<t/cv::getTickFrequency()<<std::endl;

		t = cv::getTickCount();
		fff.detectFace(im , faces );
		t = (double)cv::getTickCount() - t;
		std::cout<<"time duration pico det is "<<t/cv::getTickFrequency()<<std::endl;


		for(int c=0;c<faces.size();c++)
		{
			rectangle( cp1, faces[c], Scalar(255,0,0));
		}
		imshow("pico", cp1 );
		
		/* haar boost 
		 * */
	
		vector<Rect> faces2;
	
		t = (double)getTickCount();
		face_cascade.detectMultiScale( im, faces2, scale_factor, 2, 0, Size(min_,min_), Size(max_,max_) );
		t = (double)getTickCount() - t;
		std::cout<<"time duration haarboost is "<<t/getTickFrequency()<<std::endl;

		for(int c=0;c<faces2.size();c++)
		{
			rectangle( cp2, faces2[c], Scalar(0,255,0));
		}
		imshow("haar", cp2 );



		/* dlib face detect */
		t = (double)cv::getTickCount();
        std::vector<dlib::rectangle> dets = detector(img);
		t = (double)cv::getTickCount() - t;
		std::cout<<"time duration in dlib is "<<t/(double)cv::getTickFrequency()<<std::endl;

		for (unsigned long j = 0; j < dets.size(); ++j)
        {
			int top_ = dets[j].top();
			int left_ = dets[j].left();
			int width_ = dets[j].width();
			int height_ = dets[j].height();

			rectangle( cp3, Rect( left_, top_, width_, height_ ), Scalar(0,0,255));
		}

		imshow("shog",cp3);


		waitKey(0);	
	}
      
	return 0;
}
int main(int argvc, char** argv){
  Mat mask(3,3,CV_32F), mask1;
  VideoCapture video_in;
  mask = Mat(3, 3, CV_32F, gauss);
  scaleAdd(mask, 1/16.0, Mat::zeros(3,3,CV_32F), mask1);
  mask = mask1;
  int cont = 0;
  video_in.open("tiltshiftvideo_entrada.mp4");

  width = video_in.get(CV_CAP_PROP_FRAME_WIDTH);
  height = video_in.get(CV_CAP_PROP_FRAME_HEIGHT);
  vertical_slider = height/2;
  vertical_slider_max = height;
  center_slider = height/2;
  center_slider_max = height;
  VideoWriter video_out("tiltshiftvideo_saida.avi",CV_FOURCC('M','J','P','G'),10,Size(width,height),true);

  namedWindow("tiltshiftvideo", 1);

  sprintf( TrackbarName, "Alpha x %d", alfa_slider_max );
  createTrackbar( TrackbarName, "tiltshiftvideo",&alfa_slider,alfa_slider_max,on_trackbar_blend );
  //on_trackbar_blend(alfa_slider, 0 );

  sprintf( TrackbarName, "DistCenter x %d", center_slider_max );
  createTrackbar( TrackbarName, "tiltshiftvideo",&center_slider, center_slider_max, on_trackbar_center );
  //on_trackbar_center(center_slider, 0 );

  sprintf( TrackbarName, "VertPosi x %d", vertical_slider_max );
  createTrackbar( TrackbarName, "tiltshiftvideo", &vertical_slider, vertical_slider_max, on_trackbar_vert );
  //on_trackbar_vert(vertical_slider, 0 );

  if(!video_in.isOpened()){
    return -1;
  }
  for(;;) {
    // video_in >> original;
    bool bSuccess = video_in.read(original); // read a new frame from video
    if (!bSuccess){
      cout << "Cannot read the frame from video file" << endl;
      break;
    }
    if(cont == 6){
      filter2D(original, desfocada, original.depth(), mask, Point(1,1), 0);
      filter2D(desfocada, desfocada, desfocada.depth(), mask, Point(1,1), 0);
      filter2D(desfocada, desfocada, desfocada.depth(), mask, Point(1,1), 0);
      filter2D(desfocada, desfocada, desfocada.depth(), mask, Point(1,1), 0);
      filter2D(desfocada, desfocada, desfocada.depth(), mask, Point(1,1), 0);
      filter2D(desfocada, desfocada, desfocada.depth(), mask, Point(1,1), 0);
      desfocada.copyTo(desf_cpy);
      original = increase_colour_saturation(original);
      on_trackbar_blend(alfa_slider, 0 );
      on_trackbar_center(center_slider, 0 );
      on_trackbar_vert(vertical_slider, 0 );
      if(waitKey(30)>=0) break;
      cont = 0;
      video_out.write(blended);
    }
    cont++;
  }

  return 0;
}
Esempio n. 29
0
//--------------------------------------【main( )函数】-----------------------------------------
//          描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-------------------------------------------------------------------------------------------------
int main( )
{

	//【1】以灰度模式读取原始图像并显示
	Mat srcImage = imread("1.jpg", 0);
	if(!srcImage.data ) { printf("读取图片错误,请确定目录下是否有imread函数指定图片存在~! \n"); return false; } 
	imshow("原始图像" , srcImage);   

	ShowHelpText();

	//【2】将输入图像延扩到最佳的尺寸,边界用0补充
	int m = getOptimalDFTSize( srcImage.rows );
	int n = getOptimalDFTSize( srcImage.cols ); 
	//将添加的像素初始化为0.
	Mat padded;  
	copyMakeBorder(srcImage, padded, 0, m - srcImage.rows, 0, n - srcImage.cols, BORDER_CONSTANT, Scalar::all(0));

	//【3】为傅立叶变换的结果(实部和虚部)分配存储空间。
	//将planes数组组合合并成一个多通道的数组complexI
	Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
	Mat complexI;
	merge(planes, 2, complexI);         

	//【4】进行就地离散傅里叶变换
	dft(complexI, complexI);           

	//【5】将复数转换为幅值,即=> log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
	split(complexI, planes); // 将多通道数组complexI分离成几个单通道数组,planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
	magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude  
	Mat magnitudeImage = planes[0];

	//【6】进行对数尺度(logarithmic scale)缩放
	magnitudeImage += Scalar::all(1);
	log(magnitudeImage, magnitudeImage);//求自然对数

	//【7】剪切和重分布幅度图象限
	//若有奇数行或奇数列,进行频谱裁剪      
	magnitudeImage = magnitudeImage(Rect(0, 0, magnitudeImage.cols & -2, magnitudeImage.rows & -2));
	//重新排列傅立叶图像中的象限,使得原点位于图像中心  
	int cx = magnitudeImage.cols/2;
	int cy = magnitudeImage.rows/2;
	Mat q0(magnitudeImage, Rect(0, 0, cx, cy));   // ROI区域的左上
	Mat q1(magnitudeImage, Rect(cx, 0, cx, cy));  // ROI区域的右上
	Mat q2(magnitudeImage, Rect(0, cy, cx, cy));  // ROI区域的左下
	Mat q3(magnitudeImage, Rect(cx, cy, cx, cy)); // ROI区域的右下
	//交换象限(左上与右下进行交换)
	Mat tmp;                           
	q0.copyTo(tmp);
	q3.copyTo(q0);
	tmp.copyTo(q3);
	//交换象限(右上与左下进行交换)
	q1.copyTo(tmp);                 
	q2.copyTo(q1);
	tmp.copyTo(q2);

	//【8】归一化,用0到1之间的浮点值将矩阵变换为可视的图像格式
	normalize(magnitudeImage, magnitudeImage, 0, 1, CV_MINMAX); 

	//【9】显示效果图
	imshow("频谱幅值", magnitudeImage);    
	waitKey();

	return 0;
}
Esempio n. 30
-1
int main( int argc, char** argv ){

	//capture the video from web cam
	VideoCapture cap(0);
 
	// if not success, exit program
	if ( !cap.isOpened() ){  
        	cout << "Cannot open the web cam" << endl;
        	return -1;
	}

	//set height and width of capture frame
	cap.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);

	//create a window called "Control"
	namedWindow("Control", CV_WINDOW_AUTOSIZE);

	//Create trackbars in "Control" window

	cvCreateTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
	cvCreateTrackbar("HighH", "Control", &iHighH, 179);

	cvCreateTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
 	cvCreateTrackbar("HighS", "Control", &iHighS, 255);

 	cvCreateTrackbar("LowV", "Control", &iLowV, 255); //Value (0 - 255)
 	cvCreateTrackbar("HighV", "Control", &iHighV, 255);


    	while (true){

        	Mat imgOriginal;

        	bool bSuccess = cap.read(imgOriginal); // read a new frame from video

         	if (!bSuccess){ //if not success, break loop
             		cout << "Cannot read a frame from video stream" << endl;
             		break;
        	}

		//Convert the captured frame from BGR to HSV
  		Mat imgHSV;
  		cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV); 
 
		//Threshold the image
		Mat imgThresholded;

  		inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded);

  		//morphological opening (remove small objects from the foreground)
  		erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
  		dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) ); 

  		//morphological closing (fill small holes in the foreground)
  		dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) ); 
  		erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

		//these two vectors needed for output of findContours
		vector< vector<Point> > contours;
		vector<Vec4i> hierarchy;

		Mat imgContour;
		imgThresholded.copyTo(imgContour);

		//find contours of filtered image using openCV findContours function
		findContours(imgContour,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
		
		//use moments method to find our filtered object
		double refArea = 0;
		if (hierarchy.size() > 0) {
			int numObjects = hierarchy.size();
			for (int index = 0; index >= 0; index = hierarchy[index][0]) {
				Moments moment = moments((cv::Mat)contours[index]);
				double area = moment.m00;
				if(area>MIN_OBJECT_AREA){ //jika area kontur lebih besar dari minimum area object maka gambar lingkaran dan tulis koordinat
					double x = moment.m10/area;
					double y = moment.m01/area;
					//double r = sqrt(area/3.14); //jari2 lingkaran
					double keliling = arcLength( contours[index], true );

					drawContours( imgOriginal, contours, index, Scalar(0,0,255), 1, 8, hierarchy);
					
					putText(imgOriginal, intToString(x) + "," + intToString(y), Point(x,y+20), FONT_HERSHEY_COMPLEX, 0.5, Scalar(0, 255, 0), 1, 8);
					putText(imgOriginal, "Luas: " + intToString(area), Point(x,y+40), FONT_HERSHEY_COMPLEX, 0.5, Scalar(0, 255, 0), 1, 8);
					putText(imgOriginal, "Keliling: " + intToString(keliling), Point(x,y+60), FONT_HERSHEY_COMPLEX, 0.5, Scalar(0, 255, 0), 1, 8);
						
				}//end if
			}//end for
		}//end if
		
		//imshow("Contour", imgContour);

		//show the thresholded image
  		imshow("Thresholded Image", imgThresholded);

		//show the original image 
  		imshow("Original", imgOriginal);

        	if (waitKey(5) == 27) {//wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
            		cout << "esc key is pressed by user" << endl;
            		break; 
       		}
		
    	} //end while
	
   	return 0;

}