static void show()
{
    if(!exist_image || !exist_scan){
        return;
    }

    IplImage* image_view = cvCreateImage(cvGetSize(&image), image.depth, image.nChannels);
    cvCopy(&image, image_view);

	float min_d, max_d;
	min_d = max_d = scan_image.distance.at(0);
	for(int i = 1; i < IMAGE_WIDTH * IMAGE_HEIGHT; i++){
		float di = scan_image.distance.at(i);
		max_d = di > max_d ? di : max_d;
		min_d = di < min_d ? di : min_d;
	}
	float wid_d = max_d - min_d;

    /*
     * Plot depth points on an image
     */
    CvPoint pt;
    int height, width;
    for(int i = 0; i < (int)scan_image.distance.size(); i++) {
        height = (int)(i % IMAGE_HEIGHT);
        width = (int)(i / IMAGE_HEIGHT);
        if(scan_image.distance.at(i) != 0.0) {
            pt.x = width;
            pt.y = height;
			int colorid= wid_d ? ( (scan_image.distance.at(i) - min_d) * 255 / wid_d ) : 128;
			cv::Vec3b color=colormap.at<cv::Vec3b>(colorid);
			int g = color[1];
			int b = color[2];
			int r = color[0];
            cvCircle(image_view, pt, 2, CV_RGB (r, g, b), CV_FILLED, 8, 0);
        }
    }


  drawRects(image_view,
            car_fused_objects.obj,
            cvScalar(255.0, 255.0, 0,0),
            (image_view->height)*.3);

  drawRects(image_view,
            pedestrian_fused_objects.obj,
            cvScalar(0.0, 255.0, 0,0),
            (image_view->height)*.3);
  /* PUT DISTANCE text on image */
  putDistance(image_view,
              car_fused_objects.obj,
              (image_view->height)*.3,
              car_fused_objects.type.c_str());
  putDistance(image_view,
              pedestrian_fused_objects.obj,
              (image_view->height)*.3,
              pedestrian_fused_objects.type.c_str());

    /*
     * Show image
     */
    cvShowImage(window_name, image_view);
    cvWaitKey(2);
    cvReleaseImage(&image_view);
}
示例#2
0
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
	//bridge that will transform the message (image) from ROS code back to "image" code
  sensor_msgs::CvBridge bridge;
  fprintf(stderr, "\n call Back funtion \n");
  //publish data (obstacle waypoints) back to the boat
  //ros::NodeHandle n;
  //std_msgs::Float32 xWaypoint_msg;         // X coordinate obstacle message
  //std_msgs::Float32 yWaypoint_msg;         // Y coordinate obstacle message
  //publish the waypoint data             
  //ros::Publisher waypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //ros::Publisher Ywaypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //std::stringstream ss;
  
  /***********************************************************************/
    //live image coming streamed straight from the boat's camera
    IplImage* boatFront = bridge.imgMsgToCv(msg, "bgr8");
    //The boat takes flipped images, so you need to flip them back to normal
    cvFlip(boatFront, boatFront, 0);
    IplImage* backUpImage = cvCloneImage(boatFront);
    boatFront->origin = IPL_ORIGIN_TL;   //sets image origin to top left corner
    int X = boatFront->height;
    int Y = boatFront->width;
    //cout << "height " << X << endl;
    //cout << "width " << Y << endl;    
    
    /*********************Image Filtering variables****************************/
    //these images are used for segmenting objects from the overall background 
    //create a one channel image to convert from RGB to GRAY
    IplImage* grayImage = cvCreateImage(cvGetSize(boatFront),IPL_DEPTH_8U,1);
    //convert grayImage to binary (final step after converting from GRAY)
    IplImage* bwImage = cvCreateImage(cvGetSize(grayImage),IPL_DEPTH_8U,1);
    //variables used for the flood fill segmentation
    CvPoint seed_point = cvPoint(boatFront->height/1.45,0);       //not sure how this variable works
    CvScalar color = CV_RGB(250,0,0);
    CvMemStorage* grayStorage = NULL;     //memory storage for contour sequence
    CvSeq* contours = 0;
    // get blobs and filter them using their area
    //IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL);
    //IplImage* original, *originalThr;
    //IplImage* segmentated = cvCreateImage(cvGetSize(boatFront), 8, 1);
    //unsigned int blobNumber = 0;
    //IplImage* labelImg = cvCreateImage(cvGetSize(boatFront), IPL_DEPTH_LABEL, 1);
    CvMoments moment;
    
    /***********************************************************************/
    //boat's edge distance from the camera. This is used for visual calibration
    //to know the distance from the boat to the nearest obstacles.
    //With respect to the mounted camera, distance is 21 inches (0.5334 m) side to side
    //and 15 inches (0.381 m).
    //float boatFrontDistance = 0.381;    //distance in meters
    //float boatSideDistance = 0.5334;    //distance in meters
    // These variables tell the distance from the center bottom of the image
    // (the camera) to the square surrounding a the obstacle
    float xObstacleDistance = 0.0;
    float yObstacleDistance = 0.0;
    float obstacleDistance = 0.0;
    float obstacleHeading = 0.0;
    
    int pixelsNumber = 50;  //number of pixels for an n x n matrix and # of neighbors
    const int arraySize = pixelsNumber;
    const int threeArraySize = pixelsNumber;
    //if n gets changed, then the algorithm might have to be
    //recalibrated. Try to keep it constant
    //these variables are used for the k nearest neighbors
    //int accuracy;
    //reponses for each of the classifications
    float responseWaterH, responseWaterS, responseWaterV; 
    float responseGroundH, responseGroundS, responseGroundV;
    float responseSkyH, responseSkyS, responseSkyV;
    float averageHue = 0.0;
    float averageSat = 0.0;
    float averageVal = 0.0;
    CvMat* trainClasses = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
    //used with the classifier 
    CvMat* trainClassesH = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    CvMat* trainClassesS = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    CvMat* trainClassesV = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    //CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
    //used with the classifier 
    /*CvMat* nearestWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    //Distance
    CvMat* distanceWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1); */
    //these variables are use to traverse the picture by blocks of n x n pixels at
    //a time. 
    //Index(0,0) does not exist, so make sure kj and ki start from 1 (in the
    //right way, of course)
    //x and y are the dimensions of the local patch of pixels
    int x = (boatFront->height)/1.45;//(boatFront->height)/2.5 + 105; 
    int y = 0;  
    int skyX = 0; 
    int skyY = 0;
    int row1 = 0;
    int column1 = 0;
    //these two variables are used in order to divide the grid in the
    //resample segmentation part
    int xDivisor = 200;
    int yDivisor = 200;
    //ground sample
    //CvMat* groundTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    //CvMat* groundTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    //CvMat* groundTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    //water sample
    CvMat* waterTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    CvMat* waterTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    CvMat* waterTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
    //n x n sample patch taken from the picture
    CvMat* sampleHue = cvCreateMat(1,arraySize,CV_32FC1);
    CvMat* sampleSat = cvCreateMat(1,arraySize,CV_32FC1);
    CvMat* sampleVal = cvCreateMat(1,arraySize,CV_32FC1);
    CvMat* resampleHue = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1);
    CvMat* resampleSat = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1);
    CvMat* resampleVal = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1);
    int xDiv = 20;
    int yDiv = 20;
    CvMat* resampleHue2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1);
    CvMat* resampleSat2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1);
    CvMat* resampleVal2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1);
    //sky training sample
    CvMat* skyTrainingHue = cvCreateMat(arraySize,arraySize,CV_32FC1);
    CvMat* skyTrainingSat = cvCreateMat(arraySize,arraySize,CV_32FC1);
    CvMat* skyTrainingVal = cvCreateMat(arraySize,arraySize,CV_32FC1);
    //initialize each matrix element to zero for ease of use
    //cvZero(groundTrainingHue);
    //cvZero(groundTrainingSat);
    //cvZero(groundTrainingVal);
    cvZero(waterTrainingHue);
    cvZero(waterTrainingSat);
    cvZero(waterTrainingVal);
    cvZero(sampleHue);
    cvZero(sampleSat);
    cvZero(sampleVal);
    cvZero(resampleHue);
    cvZero(resampleSat);
    cvZero(resampleVal);
    cvZero(skyTrainingHue);
    cvZero(skyTrainingSat);
    cvZero(skyTrainingVal);    
    //Stores the votes for each channel (whether it belongs to water or not
    //1 is part of water, 0 not part of water
    //if sum of votes is bigger than 1/2 the number of elements, then it belongs to water
    int votesSum = 0;
    int comparator[3];        //used when only three votes are needed
    //int comparatorTwo [3][3];    //used when six votes are needed
    //initial sum of votes is zero
    //Error if initialize both matrices inside a single for loop. Dont know why
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
 
    /***********************************************************************/
    //Convert from RGB to HSV to control the brightness of the objects.
    //work with reflexion
    /*Sky recognition. Might be useful for detecting reflexion on the water. If
      the sky is detected, and the reflection has the same characteristics of
      something below the horizon, that "something" might be water. Assume sky
      wont go below the horizon
    */
    //convert from RGB to HSV
    cvCvtColor(boatFront, boatFront, CV_BGR2HSV);
    cvCvtColor(backUpImage, backUpImage, CV_BGR2HSV);
    HsvImage I(boatFront);
    HsvImage IBackUp(backUpImage);
    //Sky detection
    /*
    for (int i=0; i<boatFront->height;i++)
    {
        for (int j=0; j<boatFront->width;j++)
        {
        //if something is bright enough, consider it sky and store the
        //value. HSV values go from 0 to 180 ... RGB goes from 0 to 255
            if (((I[i][j].v >= 180) && (I[i][j].s <= 16)))
                // && ((I[i][j].h >=10)))) //&& (I[i][j].h <= 144))))
            {
                //The HSV values vary between 0 and 1
                cvmSet(skyTrainingHue,skyX,skyY,I[i][j].h);
                cvmSet(skyTrainingSat,skyX,skyY,I[i][j].s);
                cvmSet(skyTrainingVal,skyX,skyY,I[i][j].v);
                //I[i][j].h = 0.3*180;       //H (color)
                //I[i][j].s = 0.3*180;          //S (color intensity)
                //I[i][j].v = 0.6*180;          //V (brightness)
                if (skyY == pixelsNumber-1)
                {
                   if (skyX == pixelsNumber-1)
                     skyX = 1;
                   else
                     skyX = skyX + 1;
                   skyY = 1;
                }
                else
                  skyY = skyY + 1;
           }   
        }
    }
    
    /***********************************************************************/
    //offline input pictures. Samples of water properties are taken from these 
    //pictures to get a range of values for H, S, V that will be stored into a 
    //pre-defined classifier
    IplImage* imageSample1 = cvLoadImage("20110805_032255.jpg");
    cvSetImageROI(imageSample1, cvRect(0,0,imageSample1->height/0.5,imageSample1->width/1.83));
    cvCvtColor(imageSample1, imageSample1, CV_BGR2HSV);
    HsvImage I1(imageSample1);
    IplImage* imageSample2 = cvLoadImage("20110805_032257.jpg");
    cvCvtColor(imageSample2, imageSample2, CV_BGR2HSV);
    HsvImage I2(imageSample2);
    IplImage* imageSample3 = cvLoadImage("20110805_032259.jpg");
    cvCvtColor(imageSample3, imageSample3, CV_BGR2HSV);
    HsvImage I3(imageSample3);
    IplImage* imageSample4 = cvLoadImage("20110805_032301.jpg");
    cvCvtColor(imageSample4, imageSample4, CV_BGR2HSV);
    HsvImage I4(imageSample4);
    IplImage* imageSample5 = cvLoadImage("20110805_032303.jpg");
    cvCvtColor(imageSample5, imageSample5, CV_BGR2HSV);
    HsvImage I5(imageSample5);
    IplImage* imageSample6 = cvLoadImage("20110805_032953.jpg");
    cvCvtColor(imageSample6, imageSample6, CV_BGR2HSV);
    HsvImage I6(imageSample6);
    IplImage* imageSample7 = cvLoadImage("20110805_032955.jpg");
    cvCvtColor(imageSample7, imageSample7, CV_BGR2HSV);
    HsvImage I7(imageSample7);
    IplImage* imageSample8 = cvLoadImage("20110805_032957.jpg");
    cvCvtColor(imageSample8, imageSample8, CV_BGR2HSV);
    HsvImage I8(imageSample8);
    IplImage* imageSample9 = cvLoadImage("20110805_032959.jpg");
    cvCvtColor(imageSample9, imageSample9, CV_BGR2HSV);
    HsvImage I9(imageSample9);
    IplImage* imageSample10 = cvLoadImage("20110805_033001.jpg");
    cvCvtColor(imageSample10, imageSample10, CV_BGR2HSV);
    HsvImage I10(imageSample10);
    IplImage* imageSample11 = cvLoadImage("20110805_033009.jpg");
    cvCvtColor(imageSample11, imageSample11, CV_BGR2HSV);
    HsvImage I11(imageSample11);
    IplImage* imageSample12 = cvLoadImage("20110805_033011.jpg");
    cvCvtColor(imageSample12, imageSample12, CV_BGR2HSV);
    HsvImage I12(imageSample12);
    
    for (int i=0; i < threeArraySize; i++)
    {
        for (int j=0; j < arraySize; j++)
        {
            row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8);
            column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615);
            averageHue = (I1[row1][column1].h + I2[row1][column1].h + I3[row1][column1].h + I4[row1][column1].h + I5[row1][column1].h + 	
            I6[row1][column1].h + I7[row1][column1].h + I8[row1][column1].h + I9[row1][column1].h + I10[row1][column1].h + I11[row1][column1].h + I12[row1][column1].h) / 12;
            averageSat = (I1[row1][column1].s + I2[row1][column1].s + I3[row1][column1].s + I4[row1][column1].s + I5[row1][column1].s + 
            I6[row1][column1].s + I7[row1][column1].s + I8[row1][column1].s + I9[row1][column1].s + I10[row1][column1].s + I11[row1][column1].s + I12[row1][column1].s) / 12;
            averageVal = (I1[row1][column1].v + I2[row1][column1].v + I3[row1][column1].v + I4[row1][column1].v + I5[row1][column1].v + 
            I6[row1][column1].v + I7[row1][column1].v + I8[row1][column1].v + I9[row1][column1].v + I10[row1][column1].v + I11[row1][column1].v + I12[row1][column1].v) / 12;
            //water patch sample (n X n matrix)
            cvmSet(waterTrainingHue,i,j,averageHue);
            cvmSet(waterTrainingSat,i,j,averageSat);
            cvmSet(waterTrainingVal,i,j,averageVal);  
             //patch is red (this is for me to know where the ground patch sample is)
            //I[row1][column1].h = 0;
            //I[row1][column1].s = 255;
            //I[row1][column1].v = 255;
        }
    }
    //creating a training sample from the an image taken on the fly
    row1 = 0;
    column1 = 0;
    for (int i=0; i<pixelsNumber; i++)
    {
        for (int j=0; j<pixelsNumber; j++)
        {
           row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8);
           column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615);
           cvmSet(trainClassesH,i,0,I[row1][column1].h);
           cvmSet(trainClassesS,i,0,I[row1][column1].s);
           cvmSet(trainClassesV,i,0,I[row1][column1].v);
           
        }
    }
    //order the water samples in ascending order on order to know a range
    cvSort(waterTrainingHue, waterTrainingHue, CV_SORT_ASCENDING);
    cvSort(waterTrainingSat, waterTrainingSat, CV_SORT_ASCENDING);
    cvSort(waterTrainingVal, waterTrainingVal, CV_SORT_ASCENDING);
    // find the maximum and minimum values in the array to create a range
    int maxH = cvmGet(waterTrainingHue,0,0);
    int maxS = cvmGet(waterTrainingSat,0,0);
    int maxV = cvmGet(waterTrainingVal,0,0);
    int minH = cvmGet(waterTrainingHue,0,0);
    int minS = cvmGet(waterTrainingSat,0,0);
    int minV = cvmGet(waterTrainingVal,0,0);
    for (int i=0; i < threeArraySize; i++)
    {
        for (int j=0; j < arraySize; j++)
        {
            if (cvmGet(waterTrainingHue,i,j) > maxH)
                maxH = cvmGet(waterTrainingHue,i,j);
            if (cvmGet(waterTrainingSat,i,j) > maxS)
                maxS = cvmGet(waterTrainingHue,i,j);
            if (cvmGet(waterTrainingVal,i,j) > maxV)
                maxV = cvmGet(waterTrainingVal,i,j);
            if (cvmGet(waterTrainingHue,i,j) < minH)
                minH = cvmGet(waterTrainingHue,i,j);
            if (cvmGet(waterTrainingSat,i,j) < minS)
                minS = cvmGet(waterTrainingSat,i,j);
            if (cvmGet(waterTrainingVal,i,j) < minV)
                minV = cvmGet(waterTrainingVal,i,j);
        }
    }
    //cout << "Min Value in the range: " << endl;
    //cout << minH << endl;
    //cout << minS << endl;
    //cout << minV << endl;
    //cout << "Max Value in the range: " << endl;
    //cout << maxH << endl;
    //cout << maxS << endl;
    //cout << maxV << endl << endl;
    /*********** Main loop. It traverses through the picture**********/
    
    /**********************************************************************/
    //Ignore unused parts of the image and convert them to black
   // for (int i=0; i<boatFront->height/1.45 - 1;i++)
    //{
     //   for (int j=0; j<Y-1;j++)
      //  {
       //     I[i][j].h = 0;
       //     I[i][j].s = 0;
       //     I[i][j].v = 0;
       // }
   // }
    
    /*********************************************************************
    // Use nearest neighbors to increase accuracy
    skyX = 0; 
    skyY = 0;
    while (x < X-1)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i<6;i++)
        {
            column1 = y+i;
            if (column1 > Y-1)
                column1 = Y-1;
            cvmSet(sampleHue,0,i,I[x][column1].h);
            cvmSet(sampleSat,0,i,I[x][column1].s);
            cvmSet(sampleVal,0,i,I[x][column1].v);
        }
        //Find the shortest distance between a pixel and the neighbors from each of
        //the training samples (sort of inefficient, but might do the job...sometimes)
        //HSV for water sample
        // learn classifier
        //CvKNearest knn(trainData, trainClasses, 0, false, itemsNumber);
        CvKNearest knnWaterHue(waterTrainingHue, trainClassesH, 0, false, pixelsNumber);
        CvKNearest knnWaterSat(waterTrainingSat, trainClassesS, 0, false, pixelsNumber);
        CvKNearest knnWaterVal(waterTrainingVal, trainClassesV, 0, false, pixelsNumber);
        //HSV for ground sample
        //CvKNearest knnGroundHue(groundTrainingHue, trainClasses2, 0, false, pixelsNumber);
        //CvKNearest knnGroundSat(groundTrainingSat, trainClasses2, 0, false, pixelsNumber);
        //CvKNearest knnGroundVal(groundTrainingVal, trainClasses2, 0, false, pixelsNumber);
        //HSV for sky sample
        //if (cvmGet(skyTrainingHue,0,0)!=0.0 && cvmGet(skyTrainingSat,0,0)!=0.0 && cvmGet(skyTrainingVal,0,0)!=0.0)
        //{
        //    CvKNearest knnSkyHue(skyTrainingHue, trainClasses, 0, false, pixelsNumber);
        //    CvKNearest knnSkySat(skyTrainingSat, trainClasses, 0, false, pixelsNumber);
        //    CvKNearest knnSkyVal(skyTrainingVal, trainClasses, 0, false, pixelsNumber);
        //}

        //scan nearest neighbors to each pixel
        responseWaterH = knnWaterHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestWaterH,0);
        responseWaterS = knnWaterSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestWaterS,0);
        responseWaterV = knnWaterVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestWaterV,0);
        //responseGroundH = knnGroundHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestGroundH,0);
        //responseGroundS = knnGroundSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestGroundS,0);
        //responseGroundV = knnGroundVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestGroundV,0);
        //for (int i=0;i<pixelsNumber;i++)
        //{
            for (int j=0;j<pixelsNumber;j++)
            {
                if ((nearestWaterH->data.fl[j] == responseWaterH) )//&& (nearestWaterH->data.fl[j] == responseWaterH + 5))
                        // mark water samples as green
                    comparator[0] = 1;
                else
                    comparator[0] = 0;
                if ((nearestWaterS->data.fl[j] == responseWaterS) )//&& (nearestWaterS->data.fl[j] < responseWaterS + 5))
                    //mark water samples as green
                    comparator[1] = 1;
                else
                    comparator[1] = 0;
                if ((nearestWaterV->data.fl[j] == responseWaterV) )//&& (nearestWaterV->data.fl[j] < responseWaterV + 5))
                //mark water samples as green
                    comparator[2] = 1;
                else
                    comparator[2] = 0;
                // similar sky pixels on the water
                //count votes
                for (int i3=0; i3 < 3; i3++)
                    votesSum = votesSum + comparator[i3]; 
                if (votesSum > 1)
                {
                    I[x][y-6+j].h = 0;
                    I[x][y-6+j].s = 255;
                    I[x][y-6+j].v = 255;
                }
                votesSum = 0;
            }
        }
        if (y < Y-1)
            //5 use to be equal to pixelsNumber-1.
            y = y + 5;
        if (y > Y-1)
            y = Y-1;
        else if (y == Y-1)
        {
            //5 use to be equal to pixelsNumber-1
            x = x + 1;
            y = 0;
        }
       // ix = 0;
    }
    
    /*********************************************************************/
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
    //int counter = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/1.45;
    y = 0;
    while (x < X-1)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i<6;i++)
        {
            column1 = y+i;
            if (column1 > Y-1)
                column1 = Y-1;
            cvmSet(sampleHue,0,i,I[x][column1].h);
            cvmSet(sampleSat,0,i,I[x][column1].s);
            cvmSet(sampleVal,0,i,I[x][column1].v);
        }
        for (int i=0;i<6;i++)
        {
            for (int j=0;j<6;j++)
            {
                if ((minH < cvmGet(sampleHue,0,j)) && (maxH > cvmGet(sampleHue,0,j)))
                    //mark water samples as green
                    comparator[0] = 1;
                else
                    comparator[0] = 0;
                if ((minS < cvmGet(sampleSat,0,j)) && (maxS > cvmGet(sampleSat,0,j)))
                //mark water samples as green
                    comparator[1] = 1;
                else
                    comparator[1] = 0;
                if ((minV < cvmGet(sampleVal,0,j)) && (maxV > cvmGet(sampleVal,0,j)))
                //mark water samples as red
                    comparator[2] = 1;
                else
                    comparator[2] = 0;
                //count votes
                for (int i3=0; i3 < 3; i3++)
                    votesSum = votesSum + comparator[i3];
                if (votesSum > 1)
                {
                    //use the known water samples as new training data
                    if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor))
                    {
                        cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j));
                        cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j));
                        cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j));
                    }
                    //6 use to be equal to pixelsNumber. 
                    I[x][y-6+j].h = 0;
                    I[x][y-6+j].s = 255;
                    I[x][y-6+j].v = 255;   
                }
                votesSum = 0;
            }
        }
        if (y < Y-1)
            //5 use to be equal to pixelsNumber-1.
            y = y + 5;
        if (y > Y-1)
            y = Y-1;
        else if (y == Y-1)
        {
            //5 use to be equal to pixelsNumber-1
            x = x + 1;
            y = 0;
        }
        //ix = 0;
    }
    
    /***************Deal with reflection*****************/
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
    //int counter = 0;
    votesSum = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/1.45;
    y = 0;
    while (x < X-1)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i<6;i++)
        {
            column1 = y+i;
            if (column1 > Y-1)
                column1 = Y-1;
            cvmSet(sampleHue,0,i,I[x][column1].h);
            cvmSet(sampleSat,0,i,I[x][column1].s);
            cvmSet(sampleVal,0,i,I[x][column1].v);
        }
        for (int i=0;i<6;i++)
        {
            for (int j=0;j<6;j++)
            {
                if ((minH < cvmGet(sampleHue,0,j)) && (maxH > cvmGet(sampleHue,0,j)))
                    //mark water samples as green
                    comparator[0] = 1;
                else
                    comparator[0] = 0;
                if ((0.8*255 > cvmGet(sampleSat,0,j)))// && (maxS < cvmGet(sampleSat,0,j)))
                //mark water samples as green
                    comparator[1] = 1;
                else
                    comparator[1] = 0;
                if ((0.6*255 < cvmGet(sampleVal,0,j)))// || (maxV < cvmGet(sampleVal,0,j)))
                //mark water samples as green
                    comparator[2] = 1;
                else
                    comparator[2] = 0;
                //count votes
                for (int i3=0; i3 < 3; i3++)
                    votesSum = votesSum + comparator[i3]; 
                if (votesSum > 1)
                {
                    //use the known water samples as new training data
                    if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor))
                    {
                        cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j));
                        cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j));
                        cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j));
                    }
                    //6 use to be equal to pixelsNumber. 
                    I[x][y-6+j].h = 0;
                    I[x][y-6+j].s = 255;
                    I[x][y-6+j].v = 255;   
                }
                votesSum = 0;
            }
        }
        if (y < Y-1)
            //5 use to be equal to pixelsNumber-1.
            y = y + 5;
        if (y > Y-1)
            y = Y-1;
        else if (y == Y-1)
        {
            //5 use to be equal to pixelsNumber-1
            x = x + 1;
            y = 0;
        }
        //ix = 0;
    }
    
    /**********Resample the entire patch**********/
    /*********find a new min and max for a new sample range*************/
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
    //int counter = 0;
    votesSum = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/1.45;
    y = 0;
    maxH = cvmGet(resampleHue,0,0);
    maxS = cvmGet(resampleSat,0,0);
    maxV = cvmGet(resampleVal,0,0);
    minH = cvmGet(resampleHue,0,0);
    minS = cvmGet(resampleSat,0,0);
    minV = cvmGet(resampleVal,0,0);
    for (int i=0; i < boatFront->height/xDivisor; i++)
    {
        for (int j=0; j < boatFront->width/yDivisor; j++)
        {
            if (cvmGet(resampleHue,i,j) > maxH)
                maxH = cvmGet(resampleHue,i,j);
            if (cvmGet(resampleSat,i,j) > maxS)
                maxS = cvmGet(resampleSat,i,j);
            if (cvmGet(resampleVal,i,j) > maxV)
                maxV = cvmGet(resampleVal,i,j);
            if (cvmGet(resampleHue,i,j) < minH)
                minH = cvmGet(resampleHue,i,j);
            if (cvmGet(resampleSat,i,j) < minS)
                minS = cvmGet(resampleSat,i,j);
            if (cvmGet(resampleVal,i,j) < minV)
                minV = cvmGet(resampleVal,i,j);
        }
    }
    while (x < X-1)
    {
        for (int i=0;i<6;i++)
        {
            for (int j=0;j<6;j++)
            {
                if ((minH < I[x][y-6+j].h) && (maxH > I[x][y-6+j].h))
                    //mark water samples as red
                    I[x][y-6+j].h = 0;
                else
                    comparator[0] = 0;
                if ((minS < I[x][y-6+j].s) && (maxS > I[x][y-6+j].s))
                //mark water samples as red
                    I[x][y-6+j].s = 255;
                else
                    comparator[1] = 0;
                if ((minV < I[x][y-6+j].v) && (maxV > I[x][y-6+j].v))
                //mark water samples as red
                    I[x][y-6+j].v = 255;
            }
        }
        if (y < Y-1)
            //5 use to be equal to pixelsNumber-1.
            y = y + 5;
        if (y > Y-1)
            y = Y-1;
        else if (y == Y-1)
        {
            //5 use to be equal to pixelsNumber-1
            x = x + 1;
            y = 0;
        }
    }
    //cout << "Sample data from current images" << endl;
    //for (int i = 0; i<20;i++)
    //{
      //  cout << "HUE: " << cvmGet(sampleHue,0,i) << endl;
      //  cout << "Saturation: " << cvmGet(sampleSat,0,i) << endl;
      //  cout << "Value: " << cvmGet(sampleVal,0,i) << endl;
    //}
    //traverse through the image one more time, divide the image in grids of
    // 500x500 pixels, and see how many pixels of water are in each grid. If
    // most of the pixels are labeled water, then mark all the other pixels
    // as water as well
    //int counter = 0;
    votesSum = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/1.45;
    y = 0;
    
    /***************Divide the picture in cells for filtering**********/
    while (x < X-1)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i < boatFront->height/xDivisor; i++)
        {
            for(int j = 0; j < boatFront->width/yDivisor; j++)
            {
                cvmSet(resampleHue2,i,j,I[x+i][y+j].h);
                cvmSet(resampleSat2,i,j,I[x+i][y+j].s);
                cvmSet(resampleVal2,i,j,I[x+i][y+j].v);
                if(cvmGet(resampleHue2,i,j)==0 && cvmGet(resampleSat2,i,j)==255 && cvmGet(resampleVal2,i,j)==255)
                {
                    votesSum++;
                }
            }
        }
        if (votesSum > (((boatFront->height/xDivisor)*(boatFront->width/yDivisor))*(4/5)))
        {   
        // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water  
        // We might need to use other smaller quantities (like 5/6 maybe?)
            for (int i = 0; i < boatFront->height/xDivisor;i++)
            {
                for (int j = 0; j < boatFront->width/yDivisor; j++)
                {
                    row1 = x + i;
                    if (row1 > X-1)
                        row1 = X-1;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    I[row1][column1].h = 0;
                    I[row1][column1].s = 255;
                    I[row1][column1].v = 255;
                }
            }
        }
        else
        {   
        // If not water, eliminate all red pixels and turn those pixels
        // back to the original color
            for (int i = 0; i < boatFront->height/xDivisor;i++)
            {
                for (int j = 0; j < boatFront->width/yDivisor; j++)
                {
                    row1 = x + i;
                    if (row1 > X-1)
                        row1 = X-1;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    I[row1][column1].h = IBackUp[row1][column1].h;//255;//IBackUp[row1][column1].h;
                    I[row1][column1].s = IBackUp[row1][column1].s;//255;//IBackUp[row1][column1].s;
                    I[row1][column1].v = IBackUp[row1][column1].v;//255;//IBackUp[row1][column1].v;
                }
            }
        }
        y = y + boatFront->width/xDivisor;
        if (y > Y-1)
        {
            x = x + boatFront->height/yDivisor;
            y = 0;
        }
        votesSum = 0;
    }
    
    /********************Isolate obstacles************************/
    votesSum = 0;
    int paint = 0;
    column1 = 0;
    row1 = 0;
    x = boatFront->height/1.45;
    y = 0;
    xDiv = 20;
    yDiv = 20;
    /***************Divide the picture in cells for filtering**********/
    // Small pixel areas (noise) are going to be eliminated from the picture
    // living only the big obstacles
    while (x < X-2)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i < boatFront->height/xDiv; i++)
        {
            for(int j = 0; j < boatFront->width/yDiv; j++)
            {
                row1 = x + i;
                if (row1 > X-2)
                    row1 = X-2;
                column1 = y+j;
                if (column1 > Y-1)
                    column1 = Y-1;
                cvmSet(resampleHue2,i,j,I[row1][column1].h);
                cvmSet(resampleSat2,i,j,I[row1][column1].s);
                cvmSet(resampleVal2,i,j,I[row1][column1].v);
                if(cvmGet(resampleHue2,i,j)==0 && cvmGet(resampleSat2,i,j)==255 && cvmGet(resampleVal2,i,j)==255)
                {
                    votesSum++;
                }
            }
        }
        if (votesSum > (((boatFront->height/xDiv)*(boatFront->width/yDiv))*(4.5/5)))
        {   
        // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water  
        // We might need to use other smaller quantities (like 5/6 maybe?)
            for (int i = 0; i < boatFront->height/xDiv;i++)
            {
                for (int j = 0; j < boatFront->width/yDiv; j++)
                {
                    row1 = x + i;
                    if (row1 > X-2)
                        row1 = X-2;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    I[row1][column1].h = 0;
                    I[row1][column1].s = 255;
                    I[row1][column1].v = 255;
                }
            }
        }
        else
        {   
            int count = 0;
        // If not water, eliminate all red pixels and turn those pixels
        // back to the original color
            for (int i = 0; i < boatFront->height/xDiv;i++)
            {
                for (int j = 0; j < boatFront->width/yDiv; j++)
                {
                    row1 = x + i;
                    if (row1 > X-2)
                        row1 = X-2;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    I[row1][column1].h = IBackUp[row1][column1].h;//255;
                    I[row1][column1].s = IBackUp[row1][column1].s;//255;
                    I[row1][column1].v = IBackUp[row1][column1].v;//255;
                   // count++;
                }
            }
        }
        y = y + boatFront->width/yDiv;
        if (y > Y-1)
        {
            x = x + boatFront->height/xDiv;
            if (x > X-2)
                x = X-2;
            y = 0;
        }
        votesSum = 0;
    }
    
    /****************Find Obstacles boundaries*********************************/
    if( grayStorage == NULL )
    {
        grayStorage = cvCreateMemStorage(0);
    } 
    else 
    {
        cvClearMemStorage(grayStorage);
    }
    backUpImage = cvCloneImage(boatFront);
    //convert from HSV to RGB
    cvCvtColor(boatFront, boatFront, CV_HSV2BGR);
    cvCvtColor(backUpImage, backUpImage, CV_HSV2BGR);
    //do flood fill for obstacles
    cvFloodFill( backUpImage, seed_point, color, cvScalarAll(255), cvScalarAll(2), NULL, 8, NULL);
    //convert to to gray to do more obstacle segmentation
    cvCvtColor(backUpImage, grayImage, CV_BGR2GRAY);
    //convert to binary
    cvThreshold(grayImage, bwImage, 100, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
    //eliminate small unnecessary pixel areas
    //bwImage is a pointer, so no need to reuse findCountours
    int findCountours = bwareaopen_(bwImage, 100);          
    //find contours of obstacles in image
    cvFindContours(bwImage, grayStorage, &contours);
    cvZero( bwImage );                      //redraw clean contours
    for( CvSeq* c=contours; c!=NULL; c=c->h_next)
    {
        cvDrawContours(bwImage, c, cvScalarAll(255), cvScalarAll(255), 8);
        cout << "Contour area: " << cvContourArea(c, CV_WHOLE_SEQ) << endl;     //area in pixels                        
        //find the x,y coordinate of the center of a contour
        cvMoments(c, &moment, 0);
        //centroid/moment of the contour/obstacle
        //cout << "Contour center: " << moment.m10/moment.m00 << ", " << moment.m01/moment.m00 << endl;
        //The distance formula calculated by plotting points is given by:
        /***********  distance = 0.1622208546*pow(1.0186851612,pixels)  *****************/
        /***********  pixel = 87.0413255*pow(distance,0.4062956891) *****************/
        //These formulas only work for 640X480 images
        // x,y coordinates of the obstacle from the bottom center of the image
        //Ignore everything less than 0.3 meters apart (anything too close to the boat)
        //if ((X - (row1 -(boatFront->height/xDiv)/2)) > (87.0413255*pow(0.3,0.4062956891)))
        //{
            xObstacleDistance = 0.1622208546*pow(1.0186851612,X - (moment.m10/moment.m00));
            if (xObstacleDistance == 0.0)                  //try to ignore obstacle that are too close
                    xObstacleDistance = 0.01;                  //robot shall tell operator if there is
                                                       //a problem with a close by obstacle
            yObstacleDistance = 0.1622208546*pow(1.0186851612,Y/2 - (moment.m01/moment.m00));
            //obstacle distance
            obstacleDistance = sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2));
            //obstacle heading
            obstacleHeading = tan((yObstacleDistance/xObstacleDistance)*PI/180);
            cout << "Obstacle polar coordinates: " << endl;
            cout << "x: " << xObstacleDistance << " Y: " << yObstacleDistance << endl;
            cout << "Distance (meters) " << obstacleDistance << endl;
            cout << "Direction (degrees): " << obstacleHeading << endl << endl;
        //}
    }
  /**************************************************************************/
	try
  {
  	//fprintf(stderr,"\n boatFront\n");
    cvShowImage("Boat Front", boatFront);
    //cvShowImage("Color Segment", backUpImage);
    //cvShowImage("Obstacles", bwImage);
  }
  catch (sensor_msgs::CvBridgeException& e)
  {
    ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
  }
}
示例#3
0
void face_detector::face_marker_lbp(Mat *frame)
{
    static vector<Rect> faces;
    static Mat frame_gray;
    cvtColor(*frame, frame_gray, COLOR_BGR2GRAY);
    equalizeHist(frame_gray, frame_gray);
    static String face_cascade_name = "lbpcascade_frontalface.xml";
    static String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
    static CascadeClassifier face_cascade;
    static CascadeClassifier eyes_cascade;
    static bool loaded = false;
    if (!loaded) {
        if(!face_cascade.load(face_cascade_name)) {
            fprintf(stderr, "Could not load face classifier cascade!");
            exit(1);
        }
        if(!eyes_cascade.load( eyes_cascade_name)) {
            fprintf(stderr, "Could not load eye classifier cascade!");
            exit(1);
        }
    }
    loaded = true;
    static Cgt_Rect _area;
    static Rect largestFace;
    static Mat smallImgROI;
    static Rect eyeArea;
    static Cgt_Eye _iris_point;
    static Rect leftEyeRect(0,0,0,0), rightEyeRect(0,0,0,0);
    static vector<Rect> eyes;
    static Point left_top, right_bottom;

    face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0, Size(40, 40));
    if(faces.empty() == false) {
        //取最大的脸
        largestFace.width=0;
        largestFace.height=0;
        for (vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++) {
            if ((r->width * r->height) > (largestFace.width * largestFace.height))
                largestFace = *r;
        }

        // 将最大人脸区域赋给area
        _area.left  = largestFace.x;
        _area.right  = largestFace.x + largestFace.width;
        _area.top  = largestFace.y;
        _area.bottom = largestFace.y + largestFace.height;

        ///////////针对最大的脸检测人眼////////////////////////////////

        eyeArea = largestFace;
        eyeArea.height = eyeArea.height/1.2; //仅对人脸的上半部分检测人眼,以减少错误率 //调整一下参数,只对上半部分有时检测不出来
        smallImgROI = (*frame)(eyeArea);
        eyes_cascade.detectMultiScale(smallImgROI, eyes, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
        if(eyes.size()>=2) { //必须至少有两只眼被检出
            vector<Rect>::const_iterator nr = eyes.begin();
            leftEyeRect = *nr;
            nr++;
            rightEyeRect = *nr;
        } else {
            //fprintf(stderr, "必须至少有两只眼被检出!\n");
            return;
        }

        _iris_point.xleft = cvRound(largestFace.x + leftEyeRect.x + leftEyeRect.width*0.5);  //左眼中心的x坐标
        _iris_point.yleft = cvRound(largestFace.y + leftEyeRect.y + leftEyeRect.height*0.5);  //左眼中心的y坐标
        _iris_point.xright = cvRound(largestFace.x + rightEyeRect.x + rightEyeRect.width*0.5);  //右眼中心的x坐标
        _iris_point.yright = cvRound(largestFace.y + rightEyeRect.y + rightEyeRect.height*0.5); //右眼中心的y坐标
        //对眼睛的后期验证:
        //不允许左眼在右眼右边
//        if(iris_point.xleft >= iris_point.xright )
//            fprintf(stderr, "11111111111"), nRetCode = false;
        //不允许眼睛在边界(由于,初始化的值为0,这也意味着如果少于两个眼检测出来,则认为检测失败)
        if ((_iris_point.xleft==0) || (_iris_point.yleft==0) ||(_iris_point.xright==0) || (_iris_point.yright==0) )
            return;
        //不允许两只眼上下倾斜过多(也防止一些误检)
        if (abs(_iris_point.yright-_iris_point.yleft) > (largestFace.width/3) )
            return;
        //不允许两只眼左右间距小于1/4人脸宽度(也防止一些误检)
        if (abs(_iris_point.xright-_iris_point.xleft) < (largestFace.width/4) )
            return;

        //画出框到的人脸,验证调试用
        left_top.x = _area.left;
        left_top.y = _area.top;
        right_bottom.x = _area.right;
        right_bottom.y = _area.bottom;
        rectangle(*frame, left_top, right_bottom, CV_RGB(0,255,0), 2, 8, 0);
        left_top.x = _iris_point.xleft;
        left_top.y = _iris_point.yleft;
        right_bottom.x = left_top.x;
        right_bottom.y = left_top.y;
        rectangle(*frame, left_top, right_bottom, CV_RGB(90, 255, 0), 2, 8, 0);
        left_top.x = _iris_point.xright;
        left_top.y = _iris_point.yright;
        right_bottom.x = left_top.x;
        right_bottom.y = left_top.y;
        rectangle(*frame, left_top, right_bottom, CV_RGB(0, 255, 0), 2, 8, 0);
    }
}
示例#4
0
Mat faceDetection(Mat &videoFrame, double scaleFactor, int scaledWidth, bool eyesDetect, bool mouthDetect, CascadeClassifier &mFaceDetector, CascadeClassifier &mMouthDetector, CascadeClassifier &mEyeDetector, QString personName ) {
    Mat defaultFace;
    Mat grayscaleFace;
    Mat equalizedFace;
    Mat resizedFace;
    Mat savedFace;

const double CHANGE_IMAGE = 0.5; //0.3;      // Change in face from last time
const double CHANGE_TIME = 0.5;//1.0;       // Time change old - new

/*grayscale image*/
    videoFrame.copyTo( defaultFace );

     if (defaultFace.channels() == 3) {
         cvtColor(defaultFace, grayscaleFace, CV_BGR2GRAY);
     }
     else if (defaultFace.channels() == 4) {
         cvtColor(defaultFace, grayscaleFace, CV_BGRA2GRAY);
     }
     else {
         grayscaleFace = defaultFace;
     }

/*shrinked image*/
     float scale = defaultFace.cols / (float)scaledWidth;
     if (defaultFace.cols > scaledWidth) {
         int scaledHeight = cvRound(defaultFace.rows / scale);
         resize(grayscaleFace, resizedFace, Size(scaledWidth, scaledHeight));
     }
     else {
         resizedFace = grayscaleFace;
     }

/**/
     vector< cv::Rect > faceVec;
     mFaceDetector.detectMultiScale( resizedFace, faceVec, scaleFactor );


     for( size_t i=0; i<faceVec.size(); i++ ) {
          cv::rectangle( defaultFace, faceVec[i], CV_RGB(255,200,0), 2 );
          cv::Mat face = resizedFace( faceVec[i] );
          // EyesDetection
          if( eyesDetect == 1 ) {
                vector< cv::Rect > eyeVec;
                mEyeDetector.detectMultiScale(face, eyeVec);
                for( size_t j=0; j<eyeVec.size(); j++ ) {
                    cv::Rect rect = eyeVec[j];
                    rect.x += faceVec[i].x;
                    rect.y += faceVec[i].y;
                    //saveFace(face);
                    cv::rectangle(resizedFace, rect, CV_RGB(0,255,0), 2 );
                }
            }
         // MouthDetection
         /*if mouth was detected on second part of face, face is equalized, normalized by size and saved, if it is not as the face saved last time.*/
         if( mouthDetect == 1 ) {
                vector< cv::Rect > mouthVec;
                Rect halfRect = faceVec[i];
                halfRect.height /= 2;
                halfRect.y += halfRect.height;
                Mat halfFace = videoFrame( halfRect );
                mMouthDetector.detectMultiScale( halfFace, mouthVec, 3 );
                for( size_t j=0; j<mouthVec.size(); j++ ) {
                    cv::Rect rect = mouthVec[j];
                    rect.x += halfRect.x;
                    rect.y += halfRect.y;

                    Mat equalizedImg;
                    equalizeHist(face, equalizedImg);

                    Mat face_resized;
                    resize(equalizedImg, face_resized, Size(faceSize, faceSize), 1.0, 1.0, INTER_CUBIC);

                    double imageDiff = 10000000000.0;
                    if (oldFace.data) {
                        imageDiff = norm(face_resized, oldFace, NORM_L2)/(double)(face_resized.rows * face_resized.cols);
                    }



                    double current_time = (double)getTickCount();
                    double timeDiff_seconds = (current_time - oldTime)/getTickFrequency();
                    cout << "imageDiff " << imageDiff << "  and timeDiff " << timeDiff_seconds <<  "  and oldtime " << oldTime << "  and currentTime " <<current_time << "\n";

                    if ((imageDiff > CHANGE_IMAGE) && (timeDiff_seconds > CHANGE_TIME)) {

                            saveFace(face_resized, personName);
                            qDebug() << "Photo was saved";
                            cv::rectangle( defaultFace, rect, CV_RGB(255,255,255), 2 );

                            Mat displayedFaceRegion = defaultFace( faceVec[i]);
                            displayedFaceRegion += CV_RGB(90,90,90);

                            oldFace = face_resized.clone();
                            oldTime = current_time;

                }
            }
         }

            return defaultFace;
        }
}
示例#5
0
static GstFlowReturn
gst_motiondetect_transform_ip (GstBaseTransform * trans, GstBuffer * buf)
{
  GstMessage *m = NULL;
  StbtMotionDetect *filter = GST_MOTIONDETECT (trans);
  if ((!filter) || (!buf)) {
    return GST_FLOW_OK;
  }

  GST_OBJECT_LOCK(filter);
  if (filter->enabled && filter->state != MOTION_DETECT_STATE_INITIALISING) {
    IplImage *referenceImageGrayTmp = NULL;
    static int frameNo = 1;

    filter->cvCurrentImage->imageData = (char *) GST_BUFFER_DATA (buf);
    cvCvtColor( filter->cvCurrentImage,
        filter->cvCurrentImageGray, CV_BGR2GRAY );

    if (filter->debugDirectory) {
      gst_motiondetect_log_image (filter->cvCurrentImageGray, 
          filter->debugDirectory, frameNo, "source.png");
    }

    if (filter->state == MOTION_DETECT_STATE_REFERENCE_IMAGE_ACQUIRED) {
      gboolean result;
      
      result = gst_motiondetect_apply(
          filter->cvReferenceImageGray, filter->cvCurrentImageGray,
          filter->cvMaskImage, filter->noiseThreshold);

      if (filter->debugDirectory) {
        if (result) {
          gst_motiondetect_log_image (filter->cvReferenceImageGray, 
              filter->debugDirectory, frameNo,
              "absdiff_not_masked_motion.png");
        } else {
          gst_motiondetect_log_image (filter->cvReferenceImageGray, 
              filter->debugDirectory, frameNo,
              "absdiff_not_masked_no_motion.png");
        }
        gst_motiondetect_log_image (filter->cvMaskImage,
              filter->debugDirectory, frameNo, "mask.png");
      }

      GstStructure *s = gst_structure_new ("motiondetect",
          "has_motion", G_TYPE_BOOLEAN, result,
          "masked", G_TYPE_BOOLEAN, (filter->mask != NULL),
          "mask_path", G_TYPE_STRING, filter->mask, NULL);
      m = gst_message_new_element (GST_OBJECT (filter), s);

      if (filter->display) {
        buf = gst_buffer_make_writable (buf);
        cvSubS (filter->cvCurrentImage, CV_RGB(100, 100, 100),
            filter->cvCurrentImage, filter->cvInvertedMaskImage);
        if (result) {
          cvAddS (filter->cvCurrentImage, CV_RGB(50, 0, 0),
              filter->cvCurrentImage, filter->cvMaskImage);
        }
      }
    }

    referenceImageGrayTmp = filter->cvReferenceImageGray;
    filter->cvReferenceImageGray = filter->cvCurrentImageGray;
    filter->cvCurrentImageGray = referenceImageGrayTmp;
    filter->state = MOTION_DETECT_STATE_REFERENCE_IMAGE_ACQUIRED;
    ++frameNo;
  }
  GST_OBJECT_UNLOCK(filter);

  if (m) {
    gst_element_post_message (GST_ELEMENT (filter), m);
  }

  return GST_FLOW_OK;
}
示例#6
0
/*
 * -d 0,1,2,3,4 =>/dev/video0,video1,...
 * -f static file
 * 
 */
int main (int argc, char * argv[]) {
	//Two boolean variables
	char quit = 0; //Exit main program loop?
	char grab_frame = 1; //Do we grab a new frame from the camera?
	
	int thresh1=DEFAULT_TRACKBAR_VAL, thresh2=DEFAULT_TRACKBAR_VAL; //These two variables will hold trackbar positions.

	if(argc==1){
		camID=0;
		printf("set default camera 0\n");
	}else if(option(argc,argv) < 0 ){
		printf( "Wrong args!!!\n");
		exit(EXIT_FAILURE);
	}

	//These are pointers to IPL images, which will hold the result of our calculations
	IplImage *small_image = NULL; /*cvCreateImage(cvSize(IMG_WIDTH,IMG_HEIGHT),IPL_DEPTH_8U,3)*/; //size, depth, channels (RGB = 3)
	IplImage *small_grey_image = NULL;/*cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1)*/; //1 channel for greyscale
	IplImage *edge_image = NULL; /*cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1)*/; //We use cvGetSize to make sure the images are the same size. 
	
	//CvMemStorage and CvSeq are structures used for dynamic data collection. CvMemStorage contains pointers to the actual
	//allocated memory, but CvSeq is used to access this data. Here, it will hold the list of image contours.
	CvMemStorage *storage = cvCreateMemStorage(0);
	CvSeq *contours = 0;
	
	CvCapture *camera=NULL;
	if(camID>=0){
		camera = cvCreateCameraCapture(camID); //This function tries to connect to the first (0th) camera it finds and creates a structure to refer to it.
		if(!camera){ //cvCreateCameraCapture failed, most likely there is no camera connected.
			printf("Could not find a camera to capture from...\n"); //Notify the user...
			return -1; //And quit with an error.
		}
	}
	cvNamedWindow(MAIN_WINNAME, CV_WINDOW_AUTOSIZE); //Here we create a window and give it a name. The second argument tells the window to not automatically adjust its size.
	
	//We add two trackbars (sliders) to the window. These will be used to set the parameters for the Canny edge detection.
	cvCreateTrackbar("Thresh1", MAIN_WINNAME, &thresh1, 256, 0);
	cvCreateTrackbar("Thresh2", MAIN_WINNAME, &thresh2, 256, 0);
	
	//Set the trackbar position to the default value. 
	cvSetTrackbarPos("Thresh1", MAIN_WINNAME, DEFAULT_TRACKBAR_VAL); //Trackbar name, window name, position
	cvSetTrackbarPos("Thresh2", MAIN_WINNAME, DEFAULT_TRACKBAR_VAL);
	
	//Now set the mouse callback function. We need to pass the location of the contours so that it will be able to access this information.
	cvSetMouseCallback(MAIN_WINNAME, onMouse, &contours); //Window name, function pointer, user-defined parameters.
	
	IplImage *frame=NULL; //This will point to the IPL image we will retrieve from the camera.
	IplImage *frame_org=NULL;
	if(camID < 0){
		frame_org = cvLoadImage(orgFile, CV_LOAD_IMAGE_UNCHANGED);
		frame = cvLoadImage(inFile, CV_LOAD_IMAGE_UNCHANGED);
	}
	//This is the main program loop. We exit the loop when the user sets quit to true by pressing the "esc" key
	while(!quit){
		int c =0;
		if(camID >= 0){
			c=cvWaitKey(30); //Wait 30 ms for the user to press a key.
			
			//Respond to key pressed.
			switch(c){
				case 32: //Space
					grab_frame = !grab_frame; //Reverse the value of grab_frame. That way, the user can toggle by pressing the space bar.
					break;
				case 27: //Esc: quit application when user presses the 'esc' key.
					quit = 1; //Get out of loop
					break;
			};

			//If we don't have to grab a frame, we're done for this pass.
			if(!grab_frame)continue;	

			//Grab a frame from the camera.
			frame = cvQueryFrame(camera);
			
			if(!frame) continue; //Couldn't get an image, try again next time.
		}
		
		//In computer vision, it's always better to work with the smallest images possible, for faster performance.
		//cvResize will use inter-linear interpolation to fit frame into small_image.
		if(small_image==NULL){
			small_image = cvCreateImage(cvSize(frame->width,frame->height),IPL_DEPTH_8U,3); //size, depth, channels (RGB = 3)
			printf("w=%d, h=%d\n", frame->width,frame->height);
		}
		if(small_grey_image==NULL) small_grey_image=cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1);
		if(edge_image==NULL) edge_image = cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1);

		cvResize(frame, small_image, CV_INTER_LINEAR);
		
		//Many computer vision algorithms do not use colour information. Here, we convert from RGB to greyscale before processing further.
		cvCvtColor(small_image, small_grey_image, CV_RGB2GRAY);

		//We then detect edges in the image using the Canny algorithm. This will return a binary image, one where the pixel values will be 255 for 
		//pixels that are edges and 0 otherwise. This is unlike other edge detection algorithms like Sobel, which compute greyscale levels.
		cvCanny(small_grey_image, edge_image, (double)thresh1, (double)thresh2, 3); //We use the threshold values from the trackbars and set the window size to 3
		
		//The edges returned by the Canny algorithm might have small holes in them, which will cause some problems during contour detection.
		//The simplest way to solve this problem is to "dilate" the image. This is a morphological operator that will set any pixel in a binary image to 255 (on) 
		//if it has one neighbour that is not 0. The result of this is that edges grow fatter and small holes are filled in.
		//We re-use small_grey_image to store the results, as we won't need it anymore.
		cvDilate(edge_image, small_grey_image, 0, 1);
		
		//Once we have a binary image, we can look for contours in it. cvFindContours will scan through the image and store connected contours in "sorage".
		//"contours" will point to the first contour detected. CV_RETR_TREE means that the function will create a contour hierarchy. Each contour will contain 
		//a pointer to contours that are contained inside it (holes). CV_CHAIN_APPROX_NONE means that all the contours points will be stored. Finally, an offset
		//value can be specified, but we set it to (0,0).
		cvFindContours(small_grey_image, storage, &contours, sizeof(CvContour), CV_RETR_TREE, 
					   CV_CHAIN_APPROX_NONE, cvPoint(0,0));
		CvContour *max_contour = LargestContour(&contours);
		//This function will display contours on top of an image. We can specify different colours depending on whether the contour in a hole or not.
		cvDrawContours(small_image, contours, CV_RGB(255,0,0), CV_RGB(0,255,0), MAX_CONTOUR_LEVELS, 
					   1, CV_AA, cvPoint(0,0));
		
		//Finally, display the image in the window.
		cvShowImage(MAIN_WINNAME, small_image);
		if(camID< 0){
			//draw the max contour on the original
			cvDrawContours(frame_org, (CvSeq *)max_contour, CV_RGB(0, 255,0), CV_RGB(0,200,0), 
						0, 1, CV_AA, cvPoint(roi_x,roi_y));		
			cvShowImage(orgFile, frame_org);
			c=cvWaitKey(0); //Wait 30 ms for the user to press a key.
			//Respond to key pressed.
			if(c == 27) quit = 1; //Get out of loop
		}
	}
	
	//Clean up before quitting.
	cvDestroyAllWindows(); //This function releases all the windows created so far.
	cvReleaseCapture(&camera); //Release the camera capture structure.
	
	//release memory
	cvReleaseMemStorage(&storage);
	
	//Release images
	cvReleaseImage(&small_image); //We pass a pointer to a pointer, because cvReleaseImage will set the image pointer to 0 for us.
	cvReleaseImage(&small_grey_image);
	cvReleaseImage(&edge_image);
	if( (camID < 0) && frame ){
		cvReleaseImage(&frame);
	}
	return 0; //We're done.
}
示例#7
0
文件: main.cpp 项目: kyyang28/opencv
int main( int argc, char** argv )
{
    const char *pstrWindowsSrcTitle = "Original picture";
    const char *pstrWindowsOutLineTitle = "Outline picture";

    const int IMAGE_WIDTH = 400;
    const int IMAGE_HEIGHT = 200;

    // 创建图像
    IplImage *pSrcImage = cvCreateImage(cvSize(IMAGE_WIDTH, IMAGE_HEIGHT), IPL_DEPTH_8U, 3);

    // 填充成白色
    cvRectangle(pSrcImage, cvPoint(0, 0), cvPoint(pSrcImage->width, pSrcImage->height), CV_RGB(255, 255, 255), CV_FILLED);

    // 画圆
    CvPoint ptCircleCenter = cvPoint(IMAGE_WIDTH / 4, IMAGE_HEIGHT / 2);
    int nRadius = 80;
    cvCircle(pSrcImage, ptCircleCenter, nRadius, CV_RGB(255, 255, 0), CV_FILLED);
    ptCircleCenter = cvPoint(IMAGE_WIDTH / 4, IMAGE_HEIGHT / 2);
    nRadius = 30;
    cvCircle(pSrcImage, ptCircleCenter, nRadius, CV_RGB(255, 255, 255), CV_FILLED);

    // 画矩形
    CvPoint ptLeftTop = cvPoint(IMAGE_WIDTH / 2 + 20, 20);
    CvPoint ptRightBottom = cvPoint(IMAGE_WIDTH - 20, IMAGE_HEIGHT - 20);
    cvRectangle(pSrcImage, ptLeftTop, ptRightBottom, CV_RGB(0, 255, 255), CV_FILLED);
    ptLeftTop = cvPoint(IMAGE_WIDTH / 2 + 60, 40);
    ptRightBottom = cvPoint(IMAGE_WIDTH - 60, IMAGE_HEIGHT - 40);
    cvRectangle(pSrcImage, ptLeftTop, ptRightBottom, CV_RGB(255, 255, 255), CV_FILLED);

    // 显示原图
    cvNamedWindow(pstrWindowsSrcTitle, CV_WINDOW_AUTOSIZE);
    cvShowImage(pstrWindowsSrcTitle, pSrcImage);

    // 转为灰度图
    IplImage *pGrayImage =  cvCreateImage(cvGetSize(pSrcImage), IPL_DEPTH_8U, 1);
    cvCvtColor(pSrcImage, pGrayImage, CV_BGR2GRAY);

    // 转为二值图
    IplImage *pBinaryImage = cvCreateImage(cvGetSize(pGrayImage), IPL_DEPTH_8U, 1);
    cvThreshold(pGrayImage, pBinaryImage, 250, 255, CV_THRESH_BINARY);


    // 检索轮廓并返回检测到的轮廓的个数
    CvMemStorage *pcvMStorage = cvCreateMemStorage();
    CvSeq *pcvSeq = NULL;
    cvFindContours(pBinaryImage, pcvMStorage, &pcvSeq, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

    // 画轮廓图
    IplImage *pOutlineImage = cvCreateImage(cvGetSize(pSrcImage), IPL_DEPTH_8U, 3);
    int nLevels = 5;

    // 填充成白色
    cvRectangle(pOutlineImage, cvPoint(0, 0), cvPoint(pOutlineImage->width, pOutlineImage->height), CV_RGB(255, 255, 255), CV_FILLED);
    cvDrawContours(pOutlineImage, pcvSeq, CV_RGB(255,0,0), CV_RGB(0,255,0), nLevels, 2);

    // 显示轮廓图
    cvNamedWindow(pstrWindowsOutLineTitle, CV_WINDOW_AUTOSIZE);
    cvShowImage(pstrWindowsOutLineTitle, pOutlineImage);

    cvWaitKey(0);

    cvReleaseMemStorage(&pcvMStorage);

    cvDestroyWindow(pstrWindowsSrcTitle);
    cvDestroyWindow(pstrWindowsOutLineTitle);
    cvReleaseImage(&pSrcImage);
    cvReleaseImage(&pGrayImage);
    cvReleaseImage(&pBinaryImage);
    cvReleaseImage(&pOutlineImage);

    return 0;
}
示例#8
0
文件: mean.cpp 项目: mdqyy/surfmst
// Mean Shift Algorithm
void meanShift(Mat& img1, Mat& img2, Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilterType, vector<KeyPoint>& tpkeypoints,
               Mat& tpdescriptors, vector<KeyPoint>& keypoints2, Mat& descriptors2, Mat& clusters1, Point2f &cp, int& flag,
               vector<Point2f>& MP1, Mat& img2ROI, vector<KeyPoint>& bkeypoints, Mat& bdescriptors, Mat& temp,
               int& FG_mp, int&FG, int& BG_mp, int& BG, int& FG_BG, int& msI)
{
    size_t i,j;
    // Mat temp=img2.clone();
    int converged = 0, semiConverged = 0;
    Mat img1ROI, labels1_;
    Point2f lastCenter(box.x+box.width/2,box.y+box.height/2);
    float scale = 1;

    img1ROI = img1(boxOrg);
    vector<Point2f> points1, bmp;
    KeyPoint::convert(tpkeypoints, points1);
    searchBin(tpdescriptors, clusters1, labels1_); // clustering based on Kmeans centers obatined earlier

//vector<Point2f> np;
//    Mat newimg = img1ROI.clone();
//     KeyPoint::convert(tpkeypoints, np);
//    for(size_t i=0;i<np.size();i++)
//        circle(newimg, np[i], 2, Scalar(255,0,255),2);

//     imshow( "msimg", newimg );
//    np.clear();

//    waitKey(0);


    vector<float> prevPDF(NOC); // pdf of source
    weightedPDF( points1, boxOrg, labels1_, NOC, prevPDF); // Making histogram/pdf by normalizing the data and applying weights based on positions

    // Iterations for finding the object

    Point2f zBCmax(0,0); // center corspndng to the iteration with max BC
    float BC, BCmax=0, BCprev=0, BCnow=0; // Bhattachrya coefficient, max val for an image, previous and current val for an iteration
    int stopCount=0; // MS iterations must converege for stopCount < ManualSetThreshold
    vector<Point2f> matchedPoints1, matchedPoints2;
    while ( !converged )
    {
       // ofstream tempF;
        //tempF.open("tempF.txt", ios::out);
        matchedPoints1.clear(); matchedPoints2.clear();

        Mat matchedDesc1, matchedDesc2;
        vector<int> queryIdxs, trainIdxs;

        cv::Rect expandedBox;

#ifdef DEBUG
        cout << "iteration in while = \t" << ++iter << endl;
#endif

        if (EXPANDED_BOX)
        {
            expandedBox = Rect(box.x-25, box.y-25, box.width+50, box.height+50);
            boundaryCheckRect(expandedBox);
            img2ROI = img2(expandedBox);
        }
        else
        {
          //  cout << box.br() << "\t" << box.tl() << "\t" << img2.cols << endl;
            img2ROI = img2(box);
        }

        vector<KeyPoint> tempkey;
       // Mat pointsTransed21;
        MP1.clear();

        doIteration(img1ROI, img2ROI, queryIdxs, trainIdxs,descriptorMatcher, matcherFilterType, tpkeypoints,tpdescriptors,
                    keypoints2,descriptors2, matchedDesc1, matchedDesc2,  matchedPoints1, matchedPoints2, MP1,tempkey);

        if(matchedPoints2.size() < 1)
        {
            FG=0; BG=0;FG_mp=0;BG_mp=0;FG_BG=0; msI=0;
            break;

        }
        //  mdescriptors = matchedDesc2;

        //   KeyPoint::convert(keypoints2, points2);


        if (EXPANDED_BOX)
            shiftPoints(matchedPoints2, expandedBox);
        else
            shiftPoints(matchedPoints2, box);


        // shiftPoints(matchedPoints1,boxOrg);
        vector<float> predPDF(NOC,0);

        Mat labels2, labels2_; // depending on PDF_OF_WHOLE
        Point2f z(0,0);


        //==================== Edited at 8th april =======================//
        bmp.clear();
        Mat tmatchedDesc2, tmatchedDesc1;
        vector<Point2f> tmatchedPoints2;
        msI = stopCount;
        FG_mp = matchedPoints2.size();

        vector<KeyPoint> tempbk;
        Mat tempBd;
        vector<DMatch> filteredMatches;
        crossCheckMatching( descriptorMatcher, bdescriptors, descriptors2, filteredMatches, 1 );
        trainIdxs.clear();    queryIdxs.clear();

        for( i = 0; i < filteredMatches.size(); i++ )
        {
            queryIdxs.push_back(filteredMatches[i].queryIdx);
            trainIdxs.push_back(filteredMatches[i].trainIdx);
        }


        vector<Point2f> points1; KeyPoint::convert(bkeypoints, points1, queryIdxs);
        vector<Point2f> points2;   KeyPoint::convert(keypoints2, points2, trainIdxs);
        vector<char> matchesMask( filteredMatches.size(), 0 );
        /////
        Mat H12;
        Mat drawImg;
        if (points2.size() < 4 )
        {
            cout << "backpoints less than 4, hence prev ROI is retained" << endl;
            return;
            for(i=0;i<points2.size();i++)
            {
                bmp.push_back( points2[i]);
                tempBd.push_back(bdescriptors.row(queryIdxs[i]));
                tempbk.push_back(keypoints2[trainIdxs[i]]);
                tempBd.push_back(descriptors2.row(trainIdxs[i]));
                tempbk.push_back(keypoints2[trainIdxs[i]]);
            }
        }
        else
        {
            H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, RANSAC_THREHOLD );

            if( !H12.empty() )
            {
                Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);

                for( size_t i1 = 0; i1 < points1.size(); i1++ )
                {
                    double diff = norm(points2[i1] - points1t.at<Point2f>((int)i1,0));
                    if(diff  <= 20)
                    {
                        matchesMask[i1]=1;
                        bmp.push_back( points2[i1]);
                        tempBd.push_back(bdescriptors.row(queryIdxs[i1]));
                        tempbk.push_back(keypoints2[trainIdxs[i1]]);
                        tempBd.push_back(descriptors2.row(trainIdxs[i1]));
                        tempbk.push_back(keypoints2[trainIdxs[i1]]);

                    }
                }

                drawMatches( img1ROI, bkeypoints, img2ROI, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
             #if DRAW_RICH_KEYPOINTS_MODE
                             , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
             #endif
                             );

            }
        }
        imshow("bm",drawImg);

        //============edit part ====
        shiftPoints(bmp, box);
        vector<int> bflag(bmp.size(),0);

        for(i=0;i<bmp.size();i++)
            bflag[i]=0;

        vector<int> ft(matchedPoints2.size(),0);

        for(i=0;i<matchedPoints2.size();i++)
        {
            ft[i]=0;
            for(j=0; j< bmp.size(); j++)
            {
                double diff = norm (matchedPoints2[i] - bmp[j]);
                // cout << diff << endl;
                if(diff < 0.5)
                {
                    bflag[j]=1;
                    ft[i]=1;
                    break;
                }
            }
            if(ft[i]==0)
            {
                tmatchedPoints2.push_back(matchedPoints2[i]);
                tmatchedDesc1.push_back(matchedDesc1.row(i));
                tmatchedDesc2.push_back(matchedDesc2.row(i));
            }

        }




        //=================================================================//


        // allot descriptors to the clusters to make histogram
        searchBin(tmatchedDesc1, clusters1, labels1_);
        searchBin( tmatchedDesc2, clusters1, labels2);
        if (PDF_OF_WHOLE)
            searchBin( descriptors2, clusters1, labels2_);

        // find the PDF for the above histogram as per weights
        if (PDF_OF_WHOLE)
            weightedPDF( points2, box, labels2_, NOC, predPDF);
        else
            weightedPDF( tmatchedPoints2, box, labels2, NOC, predPDF);

        // find weights for each IPoint as per the values of weighted PDFs
        vector<float> weights(labels2.rows,0);
        Mat imgTemp = img2.clone();
        findWeights( prevPDF, predPDF, labels1_, labels2, weights, queryIdxs, tmatchedDesc1, tmatchedDesc2);

        // find new ROI center as per above weights
        findNewCenter(tmatchedPoints2, weights, box, z);

        lastCenter = Point2f (box.x+box.width/2, box.y+box.height/2);

        // if current BC is less than previous BC, then take mean of the prev and current centers
        BCnow = findBC(predPDF,prevPDF);
        if (BCnow < BCprev)
            z = 0.5*(z+lastCenter);
        BCprev = BCnow;

        // check if ROI centers converge to same pixel
        if ( (norm(z - lastCenter) < 3))
        {
            semiConverged = 1;
            if (!SHOW_FINAL_ROI)
                rectangle(temp, box, Scalar(0,0,255),2);
        }
        else
        {
            // keep iterating
            stopCount++;

            if (stopCount >= MAX_MS_ITER)
            {
                semiConverged = 1;
                flag = 0;
                if (!SHOW_FINAL_ROI)
                    rectangle(temp, box, Scalar(0,0,255),2);
                z = zBCmax;
            }

            box.x = z.x - box.width/2;
            box.y = z.y - box.height/2;
            boundaryCheckRect(box);

            if (stopCount < MAX_MS_ITER)
                if (!SHOW_FINAL_ROI)
                    ;// rectangle(temp, box, Scalar(0,255,0), 2);
        }

        // store values of max BC and corresponding center z
        if ( BCnow > BCmax)
        {
            BCmax = BC;
            zBCmax = z;
        }

        if (semiConverged)
        {
            converged = 1;

            //   FG_mp, FG, BG_mp, BG, FG_BG, msI ;
            //==========edited on 5april ========
            bdescriptors.release();
            bkeypoints.clear();
            for(i=0;i<tempBd.rows;i++)
            {
                bdescriptors.push_back(tempBd.row(i));
                bkeypoints.push_back(tempbk[i]);

            }


            tpdescriptors.release();
            tpkeypoints.clear();
            //============================================//

            for(i=0;i<matchedPoints2.size();i++)
            {
                if(ft[i]==0)
                {
                    tpdescriptors.push_back(matchedDesc1.row(i));
                    tpkeypoints.push_back(tempkey[i]);

                    tpdescriptors.push_back(matchedDesc2.row(i));
                    tpkeypoints.push_back(tempkey[i]);

                }
            }


//=================================
            box.x = z.x - box.width/2;
            box.y = z.y - box.height/2;

           // imgTemp.release();

            trajectory << z.x << "\t" << z.y << "\t" << box.width << "\t" << box.height << endl;

            cp =z;


            cv::circle(temp, z, 3, Scalar(0,255,255), 3);
            cv::rectangle(temp, box, Scalar(255,0,0),2);

            cout << "MP1 \t" << MP1.size() <<"\t" << "bmp \t"  <<bmp.size() << endl;

            for(size_t i=0;i<MP1.size();i++)
            {//circle(temp, MP1[i], 3, Scalar(255,255,255),3);
              circle(temp, matchedPoints2[i], 3, Scalar(255,0,255),3);
            }

            // shiftPoints(bmp,box);
           for(size_t i=0;i<bmp.size();i++)
             { circle(temp, bmp[i], 2, Scalar(0,0,0),2);
              // cout << bmp[i] << endl;
           }

        }

        char c = (char)waitKey(10);
        if( c == '\x1b' ) // esc
        {
            cout << "Exiting from while iterator..." << endl;
            break;
        }
    }




    cv::imshow("Iter", temp);
  //  waitKey(0);
    eachIter.close();

}
示例#9
0
文件: mean.cpp 项目: mdqyy/surfmst
static void doIteration(Mat& img1, Mat& img2, vector<int>& queryIdxs, vector<int>& trainIdxs, Ptr<DescriptorMatcher>& descriptorMatcher,
                        int matcherFilter,vector<KeyPoint>& keypoints1,  Mat& descriptors1, vector<KeyPoint>& keypoints2,Mat& descriptors2,
                        Mat& matchedDesc1, Mat& matchedDesc2, vector<Point2f>& matchedPoints1, vector<Point2f>& matchedPoints2,
                        vector<Point2f>& MP1, vector<KeyPoint>& tempkey)
{
    assert( !img2.empty());
    cv::SURF mySURF;    mySURF.extended = 0;
    Mat H12;

    mySURF.detect(img2, keypoints2);    mySURF.compute(img2, keypoints2, descriptors2);
    vector<DMatch> filteredMatches;
    switch( matcherFilter )
    {
    case CROSS_CHECK_FILTER :
        crossCheckMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches, 1 );
        break;
    default :
        simpleMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches );
    }

    trainIdxs.clear();    queryIdxs.clear();

    for( size_t i = 0; i < filteredMatches.size(); i++ )
    {
        queryIdxs.push_back(filteredMatches[i].queryIdx);
        trainIdxs.push_back(filteredMatches[i].trainIdx);
    }

    //////


    Mat mDesc1, mDesc2;
    for(size_t i=0; i<queryIdxs.size(); i++)
    {
        mDesc1.push_back(descriptors1.row(queryIdxs[i]));
        mDesc2.push_back(descriptors2.row(trainIdxs[i]));
    }


    vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
    vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
    vector<char> matchesMask( filteredMatches.size(), 0 );//,  matchesMask2( filteredMatches.size(), 1 );;

    Mat drawImg;// drawImg2;

    cout << "points2.size \t" << points2.size() << endl;
    cout <<"HELLO \t" << endl;

    if( RANSAC_THREHOLD >= 0 )
    {
        if (points2.size() < 4 )
        {
            cout << "matchedPoints1 less than 4, hence prev ROI is retained" << endl;

            for(size_t i1=0;i1<points2.size();i1++)
            {
                matchesMask[i1] = 1;
                matchedPoints1.push_back( points1[i1]);
                matchedPoints2.push_back( points2[i1]);

                matchedDesc1.push_back(descriptors1.row(queryIdxs[i1]));
                matchedDesc2.push_back(descriptors2.row(trainIdxs[i1]));

                tempkey.push_back(keypoints2[trainIdxs[i1]]);
                MP1.push_back(points2[i1]);
            }
        }
        else
        {
            H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, RANSAC_THREHOLD );

            if( !H12.empty() )
            {

                Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);

                vector<Point2f> points2Shift(points2.size());
                points2Shift = points2;
                shiftPoints(points2Shift, box);
                Point2f boxCenter;
                boxCenter.x = box.x + box.width/2;
                boxCenter.y = box.y + box.height/2;

                for( size_t i1 = 0; i1 < points1.size(); i1++ )
                {
                    double descDiff = pow(norm(mDesc1.row(i1) - mDesc2.row(i1)) , 2);
                    //  if(descDiff < 0.08)
                    {
                        double diff = norm(points2[i1] - points1t.at<Point2f>((int)i1,0));
                        if(diff  <= 30)
                        {
                          //  cout << diff << endl;
                            matchesMask[i1] = 1;
                            matchedPoints1.push_back( points1[i1]);
                            matchedPoints2.push_back( points2[i1]);

                            matchedDesc1.push_back(descriptors1.row(queryIdxs[i1]));
                            matchedDesc2.push_back(descriptors2.row(trainIdxs[i1]));

                            tempkey.push_back(keypoints2[trainIdxs[i1]]);
                            MP1.push_back(points2[i1]);
                        }
                    }

                }
            }
            //              drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg2, CV_RGB(255, 255, 0), CV_RGB(255,255, 255), matchesMask2
            //              #if DRAW_RICH_KEYPOINTS_MODE
            //                                   , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
            //              #endif
            //                                 );
            drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
             #if DRAW_RICH_KEYPOINTS_MODE
                         , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
             #endif
                         );

            cout << endl;
            imshow( "doiter", drawImg );

//            Mat newimg = img1.clone();
//             KeyPoint::convert(keypoints1, points1);
//            for(size_t i=0;i<points1.size();i++)
//                 circle(newimg, points1[i], 2, Scalar(255,0,255),2);

//             imshow( "doimg", newimg );
//            points1.clear();
// waitKey(0);

        }
    }
    // waitKey(0);

}
示例#10
0
void cvRenderTracks(CvTracks const tracks, IplImage *imgSource, IplImage *imgDest, unsigned short mode, CvFont *font)
{
  CV_FUNCNAME("cvRenderTracks");
  __CV_BEGIN__;

  CV_ASSERT(imgDest&&(imgDest->depth==IPL_DEPTH_8U)&&(imgDest->nChannels==3));

  if ((mode&CV_TRACK_RENDER_ID)&&(!font))
  {
    if (!defaultFont)
    {
  font = defaultFont = new CvFont;
  cvInitFont(font, CV_FONT_HERSHEY_DUPLEX, 0.5, 0.5, 0, 1);
  // Other fonts:
  //   CV_FONT_HERSHEY_SIMPLEX, CV_FONT_HERSHEY_PLAIN,
  //   CV_FONT_HERSHEY_DUPLEX, CV_FONT_HERSHEY_COMPLEX,
  //   CV_FONT_HERSHEY_TRIPLEX, CV_FONT_HERSHEY_COMPLEX_SMALL,
  //   CV_FONT_HERSHEY_SCRIPT_SIMPLEX, CV_FONT_HERSHEY_SCRIPT_COMPLEX
    }
    else
  font = defaultFont;
  }

  if (mode)
  {
    for (CvTracks::const_iterator it=tracks.begin(); it!=tracks.end(); ++it)
    {
  if (mode&CV_TRACK_RENDER_ID)
    if (!it->second->inactive)
    {
      stringstream buffer;
      buffer << it->first;
      cvPutText(imgDest, buffer.str().c_str(), cvPoint((int)it->second->centroid.x, (int)it->second->centroid.y), font, CV_RGB(255.,255.,0.));
    }

  if (mode&CV_TRACK_RENDER_BOUNDING_BOX)
    if (it->second->inactive)
      cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 0., 50.));
    else
      cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 255., 0.));

  if (mode&CV_TRACK_RENDER_TO_LOG)
  {
    clog << "Track " << it->second->id << endl;
    if (it->second->inactive)
      clog << " - Inactive for " << it->second->inactive << " frames" << endl;
    else
      clog << " - Associated with blob " << it->second->label << endl;
    clog << " - Lifetime " << it->second->lifetime << endl;
    clog << " - Active " << it->second->active << endl;
    clog << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
    clog << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
    clog << endl;
  }

  if (mode&CV_TRACK_RENDER_TO_STD)
  {
    cout << "Track " << it->second->id << endl;
    if (it->second->inactive)
      cout << " - Inactive for " << it->second->inactive << " frames" << endl;
    else
      cout << " - Associated with blobs " << it->second->label << endl;
    cout << " - Lifetime " << it->second->lifetime << endl;
    cout << " - Active " << it->second->active << endl;
    cout << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
    cout << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
    cout << endl;
  }
    }
  }

  __CV_END__;
}
示例#11
0
void ImageViewer::displayMatches(QPainter& painter)const
{
	QPoint pt1, pt2;

	if (siftObj1.keypoints==NULL){
		printf("ERROR : Keypoints NULL\n");
		exit(-1);
	}
		
	if (dispMatch && lastComparison.tab_match!=NULL && !siftObj1.IsEmpty() ){
		// Display matches
		for (int i=0;i<lastComparison.nb_match;i++) {
			pt1.setX(ROUND(lastComparison.tab_match[i].x1));
			pt1.setY(ROUND(lastComparison.tab_match[i].y1));
			pt2.setX(ROUND(lastComparison.tab_match[i].x2));
			pt2.setY(ROUND(lastComparison.tab_match[i].y2 + siftObj1.im->height));
			
			painter.setBrush(Qt::white);
			if (lastComparison.tab_match[i].id==0)
				painter.setPen(Qt::red); //red for discarded matches
			else painter.setPen(Qt::green); //green
			
			painter.drawLine(pt1, pt2);
			painter.drawEllipse(pt1, 3, 3);
			painter.drawEllipse(pt2, 3, 3);
		}
	}
	
	#ifdef AAA
		
	//IplImage * im,* imcol;
	QSize s;
	//QPoint pt1, pt2;
	//CvScalar color;
	int i,j,im2null=0;
	Keypoint k1*=siftObj1->keypoints;
	Keypoint k2*=siftObj2->keypoints;
		/*Affine transform of the image border*/
		
		if (param.size_m()>0) {
			Matrice p1(2,1), p2(2,1), p3(2,1), p4(2,1), transl(2,1);
			transl.set_val(0,0,0);
			transl.set_val(1,0,im1->height);
			p1.set_val(0,0,0);
			p1.set_val(1,0,0);
			p2.set_val(0,0,im1->width);
			p2.set_val(1,0,0);
			p3.set_val(0,0,im1->width);
			p3.set_val(1,0,im1->height);
			p4.set_val(0,0,0);
			p4.set_val(1,0,im1->height);
			
			p1=Transform(p1,param)+transl;
			p2=Transform(p2,param)+transl;
			p3=Transform(p3,param)+transl;
			p4=Transform(p4,param)+transl;
			
			color=CV_RGB(0,128,255); //light blue
			pt1.x=ROUND(p1.get_val(0,0));
			pt1.y=ROUND(p1.get_val(1,0));
			pt2.x=ROUND(p2.get_val(0,0));
			pt2.y=ROUND(p2.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			pt1.x=ROUND(p2.get_val(0,0));
			pt1.y=ROUND(p2.get_val(1,0));
			pt2.x=ROUND(p3.get_val(0,0));
			pt2.y=ROUND(p3.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			pt1.x=ROUND(p3.get_val(0,0));
			pt1.y=ROUND(p3.get_val(1,0));
			pt2.x=ROUND(p4.get_val(0,0));
			pt2.y=ROUND(p4.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			pt1.x=ROUND(p4.get_val(0,0));
			pt1.y=ROUND(p4.get_val(1,0));
			pt2.x=ROUND(p1.get_val(0,0));
			pt2.y=ROUND(p1.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			
			/* Draw the border of the object */
			CvMemStorage *storage= cvCreateMemStorage (0); /* Memory used by openCV */
			int header_size = sizeof( CvContour );
			CvSeq *contours;
			
			IplImage* imthres = cvCreateImage(cvSize(im1->width,im1->height),IPL_DEPTH_8U, 1 );
			cvCopy( im1, imthres, 0 );
			
			/* First find the contour of a thresholded image*/
			
			cvThreshold(imthres, imthres, border_threshold, 255, CV_THRESH_BINARY );
			cvFindContours ( imthres, storage, &contours, header_size, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
			
			/* For each contour found*/
			
			while ( contours != NULL) {
				double area=fabs(cvContourArea(contours,CV_WHOLE_SEQ)); // compute area
				if ( area > 20) {
					for (int i=0;i<contours->total;i++) {
						
						/* Compute transform of contour*/
						
						CvPoint* cvpt=(CvPoint*)cvGetSeqElem( contours, i);
						p1.set_val(0,0,cvpt->x);
						p1.set_val(1,0,cvpt->y);
						p1= Transform(p1,param) + transl;
						cvpt->x=ROUND(p1.get_val(0,0));
						cvpt->y=ROUND(p1.get_val(1,0));
					}
					//                cvDrawContours( imcol, contours, CV_RGB(0,0,255),CV_RGB(255,0,0),0,2,8);
					cvDrawContours( imcol, contours, CV_RGB(0,0,255),CV_RGB(0,0,255),0,2,8);
				}
				contours = contours->h_next; // ?????
			}
			free( contours );
			cvReleaseMemStorage( &storage );
			
	}
	#endif
}
static void computeVectors( IplImage* img, IplImage* dst, short wROI, short hROI){
	if(DEBUG){
		std::cout << "-- VECTOR COMPUTING" << std::endl;		
	}
	double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
	CvSize size = cvSize(img->width,img->height); // get current frame size 640x480
	int i, idx1 = last, idx2;
	CvSeq* seq;
	CvRect comp_rect;
	CvRect roi;
	double count;
	double angle;
	CvPoint center;
	double magnitude;
	CvScalar color;

	//--SURF CORNERS--
	if(DEBUG){
		std::cout << "--- SURF CORNERS" << std::endl;
	}
        color = CV_RGB(0,255,0);
        CvMemStorage* storage2 = cvCreateMemStorage(0);
        CvSURFParams params = cvSURFParams(SURF_THRESHOLD, 1);
        CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
        cvExtractSURF( dst, 0, &imageKeypoints, &imageDescriptors, storage2, params );
        if(DEBUG){
			printf("Image Descriptors: %d\n", imageDescriptors->total);
		}
        for( int j = 0; j < imageKeypoints->total; j++ ){
            CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, j );
			center.x = cvRound(r->pt.x);
            center.y = cvRound(r->pt.y);
			if(DEBUG){
				printf("j: %d \t", j);               
				printf("total: %d \t", imageKeypoints->total);               
				printf("valor hessiano: %f \t", r->hessian);
				printf("x: %d \t", center.x);
				printf("y: %d \n", center.y);
			}
			// Agrego el Punto en donde es la region que nos interesa
			cvCircle( dst, center, cvRound(r->hessian*0.02), color, 3, CV_AA, 0 );
			// Lleno la matriz con los vectores
			relevancePointToVector(center.x, center.y, wROI, hROI, 5);
		}
	//--SURF CORNERS


	// calculate motion gradient orientation and valid orientation mask
	cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
	
	// Compute Motion on 4x4 Cuadrants
	if(DEBUG){
		std::cout << "--- MOTION CUADRANTS" << std::endl;
	}
	i	 = 25;
	color = CV_RGB(255,0,0);
	magnitude = 30;
	for (int r = 0; r < size.height; r += hROI){
		for (int c = 0; c < size.width; c += wROI){
			comp_rect.x = c;
			comp_rect.y = r;
			comp_rect.width = (c + wROI > size.width) ? (size.width - c) : wROI;
			comp_rect.height = (r + hROI > size.height) ? (size.height - r) : hROI;

			cvSetImageROI( mhi, comp_rect );
			cvSetImageROI( orient, comp_rect );
			cvSetImageROI( mask, comp_rect );
			cvSetImageROI( silh, comp_rect );
			cvSetImageROI( img, comp_rect );

			// Process Motion
			angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
			angle = 360.0 - angle;  // adjust for images with top-left origin
			count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
			roi = cvGetImageROI(mhi);
			center = cvPoint( (comp_rect.x + comp_rect.width/2),
					  (comp_rect.y + comp_rect.height/2) );
			cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
			cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
			cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );	
			
			if(DEBUG){
				std::cout << "Motion " << i << " -> x: " << roi.x << " y: " << roi.y << " count: " << count << " angle: " << angle << std::endl; // print the roi
			}
			cvResetImageROI( mhi );
			cvResetImageROI( orient );
			cvResetImageROI( mask );
			cvResetImageROI( silh );
			cvResetImageROI(img);
			relevanceDirectionToVector(i, angle);
			++i;
		}
	}

	// Compute Global Motion
	if(DEBUG){
		std::cout << "--- MOTION GLOBAL" << std::endl;
	}
	comp_rect = cvRect( 0, 0, size.width, size.height );
	color = CV_RGB(255,255,255);
	magnitude = 100;
	angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
	angle = 360.0 - angle;  // adjust for images with top-left origin
	count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
	roi = cvGetImageROI(mhi);
	center = cvPoint( (comp_rect.x + comp_rect.width/2),
			  (comp_rect.y + comp_rect.height/2) );
	cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
	cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
	cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
	if(DEBUG){
		std::cout << "Motion Main-> x: " << roi.x << " y: " << roi.y << " count: " << count << std::endl; // print the roi
	}
	relevanceDirectionToVector(50, angle);
}
示例#13
0
void ObjectTester::RunVideoDemo()
{
	GenericObjectDetector detector;

	visualsearch::io::OpenCVCameraIO cam;
	if( !cam.InitCamera() )
		return;
	//KinectDataMan kinectDM;

	//if( !kinectDM.InitKinect() )
		//return;

	bool doRank = true;
	// start fetching stream data
	while(1)
	{
		Mat cimg, dmap;
		//kinectDM.GetColorDepth(cimg, dmap);
		cam.QueryNextFrame(visualsearch::io::STREAM_COLOR, cimg);

		// resize image
		Size newsz;
		ToolFactory::compute_downsample_ratio(Size(cimg.cols, cimg.rows), 300, newsz);
		resize(cimg, cimg, newsz);
		//resize(dmap, dmap, newsz);
		//normalize(dmap, dmap, 0, 255, NORM_MINMAX);

		// get objects
		vector<ImgWin> objwins, salwins;
		if( !detector.ProposeObjects(cimg, dmap, objwins, salwins, doRank) )
			continue;

		//////////////////////////////////////////////////////////////////////////
		// draw best k windows
		int topK = MIN(6, objwins.size());
		int objimgsz = newsz.height / topK;
		int canvas_h = newsz.height;
		int canvas_w = newsz.width + 10 + objimgsz*2;
		Mat canvas(canvas_h, canvas_w, CV_8UC3);
		canvas.setTo(Vec3b(0,0,0));
		// get top windows
		vector<Mat> detimgs(topK);
		vector<Mat> salimgs(topK);
		for (int i=0; i<topK; i++)
		{
			cimg(objwins[i]).copyTo(detimgs[i]);
			resize(detimgs[i], detimgs[i], Size(objimgsz, objimgsz));
			cimg(salwins[i]).copyTo(salimgs[i]);
			resize(salimgs[i], salimgs[i], Size(objimgsz, objimgsz));
		}

		// draw boxes on input
		Scalar objcolor(0, 255, 0);
		Scalar salcolor(0, 0, 255);
		for(int i=0; i<topK; i++)
		{
			//rectangle(cimg, objwins[i], objcolor);
			rectangle(cimg, salwins[i], salcolor);
		}
		circle(cimg, Point(cimg.cols/2, cimg.rows/2), 2, CV_RGB(255,0,0));
		// copy input image
		cimg.copyTo(canvas(Rect(0, 0, cimg.cols, cimg.rows)));

		// copy subimg
		for (int i=0; i<detimgs.size(); i++)
		{
			Rect objbox(cimg.cols+10, i*objimgsz, objimgsz, objimgsz);
			detimgs[i].copyTo(canvas(objbox));
			Rect salbox(cimg.cols+10+objimgsz, i*objimgsz, objimgsz, objimgsz);
			salimgs[i].copyTo(canvas(salbox));
		}

		resize(canvas, canvas, Size(canvas.cols*2, canvas.rows*2));
		//if(doRank)
			//putText(canvas, "Use Ranking", Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 1, CV_RGB(255, 0, 0));
		imshow("object proposals", canvas);
		if( waitKey(10) == 'q' )
			break;
	}
}
示例#14
0
static void putDistance(IplImage *Image,
                        std::vector<cv_tracker::image_rect_ranged> objects,
                        int threshold_height,
                        const char* objectLabel)
{
  char distance_string[32];
  CvFont dfont;
  float hscale	    = 0.7f;
  float vscale	    = 0.7f;
  float italicscale = 0.0f;
  int	thickness   = 1;

  CvFont      dfont_label;
  float       hscale_label = 0.5f;
  float       vscale_label = 0.5f;
  CvSize      text_size;
  int         baseline     = 0;

  cvInitFont(&dfont_label, CV_FONT_HERSHEY_COMPLEX, hscale_label, vscale_label, italicscale, thickness, CV_AA);
  cvGetTextSize(objectLabel,
                &dfont_label,
                &text_size,
                &baseline);

  for (unsigned int i=0; i<objects.size(); i++)
    {
      if (objects.at(i).rect.y > threshold_height) // temporal way to avoid drawing detections in the sky
        {
          if (!isNearlyNODATA(objects.at(i).range))
            {
              /* put label */
              CvPoint labelOrg = cvPoint(objects.at(i).rect.x - OBJ_RECT_THICKNESS,
                                         objects.at(i).rect.y - baseline - OBJ_RECT_THICKNESS);

              cvRectangle(Image,
                          cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                          cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                          CV_RGB(0, 0, 0), // label background is black
                          -1, 8, 0
                          );
              cvPutText(Image,
                        objectLabel,
                        labelOrg,
                        &dfont_label,
                        CV_RGB(255, 255, 255) // label text color is white
                        );

              /* put distance data */
              cvRectangle(Image,
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 45),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 5),
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) + (((int)log10(objects.at(i).range/100)+1) * 8 + 38),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 30),
                          cv::Scalar(255,255,255),
                          -1);

              cvInitFont (&dfont,
                          CV_FONT_HERSHEY_COMPLEX,
                          hscale,
                          vscale,
                          italicscale,
                          thickness,
                          CV_AA);

              sprintf(distance_string, "%.2f m", objects.at(i).range / 100); //unit of length is meter
              cvPutText(Image,
                        distance_string,
                        cvPoint(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 40),
                                objects.at(i).rect.y + objects.at(i).rect.height + 25),
                        &dfont,
                        CV_RGB(255, 0, 0));
            }
        }
    }
}
/*!
  Initialize the display size, position and title.

  \param width, height : Width and height of the window.
  \param x, y : The window is set at position x,y (column index, row index).
  \param title : Window title.

*/
void
vpDisplayOpenCV::init(unsigned int width, unsigned int height,
                      int x, int y,
                      const char *title)
{
  this->width  = width;
  this->height = height;
  this->windowXPosition = x;
  this->windowYPosition = y;
  int flags = CV_WINDOW_AUTOSIZE;
  if (title != NULL)
  {
    if (this->title != NULL)
    {
      delete [] this->title ;
      this->title = NULL ;
    }
    this->title = new char[strlen(title) + 1] ;
    strcpy(this->title, title) ;
  }
  else{
    if (this->title != NULL)
    {
      delete [] this->title ;
      this->title = NULL ;
    }
    this->title = new char[50] ;
    sprintf(this->title,"Unnamed ViSP display <%02d>",count) ;
  }
  count++;
  /* Create the window*/
  window = cvNamedWindow( this->title, flags );
  cvMoveWindow( this->title, x, y );
  move = false;
  lbuttondown = false;
  mbuttondown = false;
  rbuttondown = false;
  lbuttonup = false;
  mbuttonup = false;
  rbuttonup = false;
  cvSetMouseCallback( this->title, on_mouse, this );
  /* Create background pixmap */
//   background = cvCreateImage(cvSize((int)width,(int)height),IPL_DEPTH_8U,3);
//
//   cvShowImage( this->title,background);

  col = new CvScalar[vpColor::id_unknown] ;

  /* Create color */
  vpColor pcolor; // Predefined colors
  pcolor = vpColor::lightBlue;
  col[vpColor::id_lightBlue]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::blue;
  col[vpColor::id_blue]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkBlue;
  col[vpColor::id_darkBlue]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::lightRed;
  col[vpColor::id_lightRed]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::red;
  col[vpColor::id_red]    = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkRed;
  col[vpColor::id_darkRed]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::lightGreen;
  col[vpColor::id_lightGreen]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::green;
  col[vpColor::id_green]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkGreen;
  col[vpColor::id_darkGreen]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::yellow;
  col[vpColor::id_yellow] = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::cyan;
  col[vpColor::id_cyan]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::orange;
  col[vpColor::id_orange] = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::purple;
  col[vpColor::id_purple] = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::white;
  col[vpColor::id_white]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::black;
  col[vpColor::id_black]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::lightGray;
  col[vpColor::id_lightGray]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::gray;
  col[vpColor::id_gray]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkGray;
  col[vpColor::id_darkGray]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;

  font = new CvFont;
  cvInitFont( font, CV_FONT_HERSHEY_PLAIN, 0.70f,0.70f);

  CvSize fontSize;
  int baseline;
  cvGetTextSize( "A", font, &fontSize, &baseline );
  fontHeight = fontSize.height + baseline;
  displayHasBeenInitialized = true ;
}
示例#16
0
文件: mean.cpp 项目: mdqyy/surfmst
int main(int argc, char** argv)
{
    ofstream f1;
    f1.open("result.txt");
    size_t i,j;
    Point2f cp;
    cv::initModule_nonfree();
    vector<Point2f> MP1,MP2;
    vector<int> trainIdxs, queryIdxs;

    //Read Video File
    VideoCapture cap("video1.avi");
    if( !cap.isOpened() )
    { cout << "Could not initialize capturing...\n"; return 0;}



    VideoWriter writer("ms_tracking.avi",CV_FOURCC('D','I','V','3'),
                 10,cvSize(640,480),1);

    cv::SURF mySURF;    mySURF.extended = 0;
    Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "FlannBased" );
    int mactherFilterType = getMatcherFilterType( "CrossCheckFilter" );

    Mat frame,img1,img2;
    cap >> frame;
    if( frame.empty() )
        return -1;
    img1 = frame.clone() ;
    Mat temp,temp1;

    if(img1.empty())
        cout << "Exiting as the input image is empty" << endl;


    const char* name = "Initiate_ROI";
    box = cvRect(-1,-1,0,0);
    cvNamedWindow( name,1);
    cvSetMouseCallback( name, my_mouse_callback2);

    // Main loop
    while( 1 )
    {
        img1.copyTo(temp);

        if( drawing_poly)
        {

            for ( i=0; i < polyPoints.size(); i++)
                circle(temp, polyPoints[i], 2, Scalar(0,255,0), -1,8);
        }
        cv::imshow(name,temp) ;
        char c = (char)waitKey(10);
        if( c == '\x1b' ) // esc
            break;
        if(poly_drawn)
            break;
    }

    //Read the polygon points from a text file

    FILE *f11;
    polyPoints.clear();
    IpolyPoints.clear();
    f11 = fopen("points.txt","r");
    Point a;
    for(int j=0;j<37;j++)
    {
        fscanf(f11,"%d",&(a.x));
        fscanf(f11,"%d",&(a.y));
        polyPoints.push_back(a);
        IpolyPoints.push_back(a);
    }
    fclose(f11);

    // Drawing Polygon
    Point pointArr[polyPoints.size()];
    for (i=0; i< polyPoints.size(); i++)
        pointArr[i] = polyPoints[i];
    const Point* pointsArray[1] = {pointArr};
    int nCurvePts[1] = { polyPoints.size() };
    polylines(temp, pointsArray, nCurvePts, 1, 1, Scalar(0,255,0), 1);

    cout << polyPoints.size() << endl;
    box= boundingRect(polyPoints);

   //boxOrg = Rect(box.x-15, box.y-15, box.width+30, box.height+30);
   boxOuter = Rect(box.x-30, box.y-30, box.width+60, box.height+60);
    //box =boxOrg; // storing the initial selected Box, as "box" variable changes in consecutive matching
    boxP=box;
    Mat img1ROI, labels1, clusters1, descriptors,roidescriptors, descriptors1,bdescriptors, bmdescriptors;
    vector<int> reprojections; // number of reprojections per KP, size same as KP(incresing)
    vector<Point2f> points,points1,points2, Mpoints1,Mpoints2,bpoints,npoints1,npoints2; //bmpoints,tpoints;
    vector<KeyPoint> roikeypoints, bkeypoints,keypoints,keypoints1, keypoints2;


    draw_box(temp, box ); //Show InnerBox  - This is used by the Mean-Shift Tracker
    draw_box(temp,boxOuter); //Show OuterBox - This is used for removing background points
    bpoints.clear();

    //calculating keypoints and descriptors of the selected polygon in image roi
    //==============================================================================================//
    for(i=0;i<polyPoints.size();i++)
    {
        // cout << polyPoints[i] << endl; //
        polyPoints[i].x = polyPoints[i].x -boxOuter.x;
        polyPoints[i].y = polyPoints[i].y- boxOuter.y;
    }

    img1ROI = img1(boxOuter);
    points1.clear();
    mySURF.detect(img1ROI, roikeypoints);
    KeyPoint::convert(roikeypoints, points);
    mySURF.compute(img1ROI, roikeypoints, roidescriptors);

    bdescriptors.release();bkeypoints.clear();
    bcategorizePoints( points, bpoints,polyPoints, roikeypoints, roidescriptors, bkeypoints, bdescriptors);
    shiftPoints(bpoints,boxOuter);
    for(i=0;i<bpoints.size();i++)
        circle(temp, bpoints[i], 2, Scalar(0,255,0),2);

  vector<KeyPoint> tpkeypoints;    Mat tpdescriptors;
    categorizePoints( points, points1,polyPoints, roikeypoints, roidescriptors, tpkeypoints, tpdescriptors);

    shiftPoints(points1, boxOuter);
    for(i=0;i<points1.size();i++)
        circle(temp, points1[i], 2, Scalar(0,0,255),2);
    //====================================================================================================//
    points1.clear();
    Mat img2ROI;

  //  tpkeypoints = keypoints1;    tpdescriptors = descriptors1;
    cv::imshow(name,temp) ;
    imwrite("a.jpg",temp);
    cout << "BD_SIZE \t" << bdescriptors.rows << "\t" << "FD_SIZE \t"  << tpdescriptors.rows << endl;


//    Mat newimg = img1ROI.clone();
//     KeyPoint::convert(tpkeypoints, points1);
//    for(size_t i=0;i<points1.size();i++)
//         circle(newimg, points1[i], 2, Scalar(255,0,255),2);

//     imshow( "newimg", newimg );
//    points1.clear();

    waitKey(0);
    cvDestroyWindow( name );


    int FG_mp, FG, BG_mp, BG, FG_BG, msI ; //Foreground matching points
    struct timeval t1, t2;

    for(int l=0;;l++)
    {
        gettimeofday(&t1, NULL);
        cv::kmeans(tpdescriptors, NOC, labels1, TermCriteria( CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 50, 1.0 ), 1,
                   KMEANS_RANDOM_CENTERS, clusters1);

        cap >> frame;
        img2 = frame.clone() ;
        temp1 =frame.clone() ;

        if(img2.empty() )
        {
            cout<< "Could not open image: " << endl ;
            break;}

        int flag=1;
        Mpoints1.clear();
        Mat descriptors2;

        msI=0;

        meanShift(img1, img2, descriptorMatcher, mactherFilterType, tpkeypoints, tpdescriptors,keypoints2,descriptors2,
                  clusters1, cp, flag, MP1,img2ROI,bkeypoints, bdescriptors, temp1,FG_mp, FG, BG_mp, BG, FG_BG,msI);



        //==========scaling=================
        float scale=1;

       // cout <<"MP1size \t" << MP1.size() <<endl;

        if(APPLY_SCALING)
        {
            vector<DMatch> filteredMatches;

            if(descriptors1.rows > 4 && descriptors2.rows > 4)
            {
                crossCheckMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches, 1 );

                trainIdxs.clear();    queryIdxs.clear();

                for( i = 0; i < filteredMatches.size(); i++ )
                {
                    queryIdxs.push_back(filteredMatches[i].queryIdx);
                    trainIdxs.push_back(filteredMatches[i].trainIdx);
                }

                points1.clear(); points2.clear();
                KeyPoint::convert(keypoints1, points1, queryIdxs);
                KeyPoint::convert(keypoints2, points2, trainIdxs);
                //  cout << "point2size" << points2.size() << endl;

                //homography

                npoints1.clear();npoints2.clear();
                Mpoints1.clear();Mpoints2.clear();
                Mat H12, points1t;
                double ransacReprojThreshold = 10;
                if( ransacReprojThreshold >= 0  && points1.size() > 4)
                    H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold );
              vector<char> matchesMask( filteredMatches.size(), 0 );// NONmatchesMask( filteredMatches.size(), 0 );
                if( !H12.empty() )
               {

                    perspectiveTransform(Mat(points1), points1t, H12);

                    double maxInlierDist = 10;//ransacReprojThreshold < 0 ? 3 : ransacReprojThreshold;

                    for(i = 0; i < points1.size(); i++ )
                    {
                        if( norm(points2[i] - points1t.at<Point2f>((int)i,0)) <= 5)// maxInlierDist ) // inlier
                        {
                            matchesMask[i] = 1;
                            npoints2.push_back(points2[i]);
                            npoints1.push_back(points1[i]);
                        }
                    }



                    for(i=0; i<npoints2.size();i++)
                    {
                        for(j=0;j<MP1.size();j++)
                        {
                            double dist = norm(npoints2[i]-MP1[j]);
                            // cout <<"dist \t" <<dist << endl;
                            //  waitKey(0);
                            if(dist < 0.1)
                            {
                                Mpoints2.push_back(npoints2[i]);
                                Mpoints1.push_back(npoints1[i]);
                                break;
                            }

                        }
                    }



                }
                Mat drawImg;
                drawMatches( img1ROI, keypoints1, img2ROI, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
             #if DRAW_RICH_KEYPOINTS_MODE
                             , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
             #endif
                             );
                imshow( "correspondance", drawImg );
                cout << "npoints1.size \t" << Mpoints1.size() << "\t" << Mpoints2.size() << endl;
                if(Mpoints1.size() > 8)
                    weightScalingAspect(Mpoints1,Mpoints2,&scale);

            }

        }


        img1=img2;
        img1ROI = img2ROI;
        boxOrg =box;
        keypoints1 = keypoints2;
        descriptors1 =descriptors2;

        box.x += box.width/2;
        box.y += box.height/2;
        box.height = round(boxOrg.height *scale);
        box.width = round(( float(boxOrg.width)/float(boxOrg.height) ) * box.height);
        box.x -= box.width/2;
        box.y -= box.height/2;

        boundaryCheckRect(box);

        cout <<"SCALE \t" << scale << endl;

        gettimeofday(&t2, NULL);
       double diff = (float)((t2.tv_sec * 1000000 + t2.tv_usec) - (t1.tv_sec * 1000000 + t1.tv_usec));
       diff = diff/1000;
        cout <<"Time taken in mili sec \t" <<  diff<< endl;
       // cout << tpdescriptors.rows << endl;
        //cout <<"BD \t" << bdescriptors.rows << endl;
        f1 <<  l << "\t" << FG_mp << "\t"   << BG_mp  << "\t"   << FG   << "\t"<< msI << "\n";
        cout << "l \t" << l << "\t" <<" msI \t"<< msI << endl;
        imshow("img2",temp1);
        writer << temp1;
         waitKey(0);




       // boxOrg = eBox;

        char c = (char)waitKey(10);
        if( c == '\x1b' ) // esc
        {
            cout << "Exiting ..." << endl;
            break;
        }

    }
    trajectory.close();

    return 0;
}
示例#17
0
//マップを描画する
void Map::showMap()
{

    IplImage *img = 0;//図を定義する
    CvScalar rcolor;//色を定義する
    
    IplImage *test;//図を定義する
    test = cvCreateImage (cvSize (400, 400), IPL_DEPTH_8U, 3);
    cvZero (test);
    for(int x=0;x<test->height;x++){
        for(int y=0;y<test->width;y++) {
            test->imageData[test->widthStep * y + x * 3]     = 255; //B
            test->imageData[test->widthStep * y + x * 3 + 1] = 255; //G
            test->imageData[test->widthStep * y + x * 3 + 2] = 255; //R
        }
    }
    
    // 画像領域を確保し初期化する
    img = cvCreateImage (cvSize (400, 400), IPL_DEPTH_8U, 3);
    cvZero (img);
    //画像の背景を白にする
    for(int x=0;x<img->height;x++){
        for(int y=0;y<img->width;y++) {
            img->imageData[img->widthStep * y + x * 3]     = 255; //B
            img->imageData[img->widthStep * y + x * 3 + 1] = 255; //G
            img->imageData[img->widthStep * y + x * 3 + 2] = 255; //R
        }
    }


	for(int i = 0; i < LINE_NUM; i++)
	{        
        wall.clear();
		wall = outliers;
		outliers.clear();
        // 点の描画 ----- ここから
        CvPoint testpoint;
        rcolor = CV_RGB (255,200,0);
        for(int j=0;j<inliers_s[i].size();j++)
        {
            int x = (int)(inliers_s[i][j].x);
            int y = (int)(inliers_s[i][j].y);
            testpoint = cvPoint(x, y);
            
            cvCircle(img, testpoint, 2, rcolor, -5);
        }
        // 点の描画 ----- ここまで
        
        
        wall.clear();
		wall = outliers;
		outliers.clear();
	}
    
    // 線の描画 ----- ここから
    rcolor = CV_RGB (200,0,200);
    for(int i=0; i<LINE_NUM; i++)
    {
        if(i != LINE_NUM-1)
        {
            cvLine (img, p[i], p[i+1], rcolor, 2, CV_AA, 0);
        }
        else
        {
            cvLine (img, p[i], p[0], rcolor, 2, CV_AA, 0);

        }
    }
    // 線の描画 ----- ここまで
    
    cvFlip(img, NULL,0);//図を上下回転する
    cvNamedWindow ("Drawing", CV_WINDOW_AUTOSIZE);
    
    cvShowImage ("Drawing", img);
    
    cvSaveImage("/Users/ispd/Workspace/Xcode/pbl3/opencv_test/opencv_test/img.png",img);
    cvWaitKey (0);
    
    cvDestroyWindow ("Drawing");
    cvReleaseImage (&img);
 
    cvDestroyWindow ("testDrawing");
    cvReleaseImage (&test);
    
}
示例#18
0
void CvCalibFilter::DrawPoints(CvMat** dstarr) {
    int i, j;

    if (!dstarr) {
        assert(0);
        return;
    }

    if (latestCounts) {
        for (i = 0; i < cameraCount; i++) {
            if (dstarr[i] && latestCounts[i]) {
                CvMat dst_stub, *dst;
                int count = 0;
                bool found = false;
                CvPoint2D32f* pts = 0;

                GetLatestPoints(i, &pts, &count, &found);

                dst = cvGetMat(dstarr[i], &dst_stub);

                static const CvScalar line_colors[] = {
                    {{0, 0, 255}},
                    {{0, 128, 255}},
                    {{0, 200, 200}},
                    {{0, 255, 0}},
                    {{200, 200, 0}},
                    {{255, 0, 0}},
                    {{255, 0, 255}}
                };

                const int colorCount = sizeof(line_colors) / sizeof(line_colors[0]);
                const int r = 4;
                CvScalar color = line_colors[0];
                CvPoint prev_pt = { 0, 0};

                for (j = 0; j < count; j++) {
                    CvPoint pt;
                    pt.x = cvRound(pts[j].x);
                    pt.y = cvRound(pts[j].y);

                    if (found) {
                        if (etalonType == CV_CALIB_ETALON_CHESSBOARD) {
                            color = line_colors[(j/cvRound(etalonParams[0])) % colorCount];
                        } else {
                            color = CV_RGB(0, 255, 0);
                        }

                        if (j != 0) {
                            cvLine(dst, prev_pt, pt, color, 1, CV_AA);
                        }
                    }

                    cvLine(dst, cvPoint(pt.x - r, pt.y - r),
                           cvPoint(pt.x + r, pt.y + r), color, 1, CV_AA);

                    cvLine(dst, cvPoint(pt.x - r, pt.y + r),
                           cvPoint(pt.x + r, pt.y - r), color, 1, CV_AA);

                    cvCircle(dst, pt, r + 1, color, 1, CV_AA);

                    prev_pt = pt;
                }
            }
        }
    }
}
示例#19
0
/* chain function
 * this function does the actual processing
 */
static GstFlowReturn
gst_template_match_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstTemplateMatch *filter;
  CvPoint best_pos;
  double best_res;
  GstMapInfo info;
  GstMessage *m = NULL;

  filter = GST_TEMPLATE_MATCH (parent);

  if ((!filter) || (!buf)) {
    return GST_FLOW_OK;
  }
  GST_LOG_OBJECT (filter, "Buffer size %u", (guint) gst_buffer_get_size (buf));

  buf = gst_buffer_make_writable (buf);
  gst_buffer_map (buf, &info, GST_MAP_READWRITE);
  filter->cvImage->imageData = (char *) info.data;

  GST_OBJECT_LOCK (filter);
  if (filter->cvTemplateImage && !filter->cvDistImage) {
    if (filter->cvTemplateImage->width > filter->cvImage->width) {
      GST_WARNING ("Template Image is wider than input image");
    } else if (filter->cvTemplateImage->height > filter->cvImage->height) {
      GST_WARNING ("Template Image is taller than input image");
    } else {

      GST_DEBUG_OBJECT (filter, "cvCreateImage (Size(%d-%d+1,%d) %d, %d)",
          filter->cvImage->width, filter->cvTemplateImage->width,
          filter->cvImage->height - filter->cvTemplateImage->height + 1,
          IPL_DEPTH_32F, 1);
      filter->cvDistImage =
          cvCreateImage (cvSize (filter->cvImage->width -
              filter->cvTemplateImage->width + 1,
              filter->cvImage->height - filter->cvTemplateImage->height + 1),
          IPL_DEPTH_32F, 1);
      if (!filter->cvDistImage) {
        GST_WARNING ("Couldn't create dist image.");
      }
    }
  }
  if (filter->cvTemplateImage && filter->cvDistImage) {
    GstStructure *s;

    gst_template_match_match (filter->cvImage, filter->cvTemplateImage,
        filter->cvDistImage, &best_res, &best_pos, filter->method);

    s = gst_structure_new ("template_match",
        "x", G_TYPE_UINT, best_pos.x,
        "y", G_TYPE_UINT, best_pos.y,
        "width", G_TYPE_UINT, filter->cvTemplateImage->width,
        "height", G_TYPE_UINT, filter->cvTemplateImage->height,
        "result", G_TYPE_DOUBLE, best_res, NULL);

    m = gst_message_new_element (GST_OBJECT (filter), s);

    if (filter->display) {
      CvPoint corner = best_pos;
      CvScalar color;
      if (filter->method == CV_TM_SQDIFF_NORMED
          || filter->method == CV_TM_CCORR_NORMED
          || filter->method == CV_TM_CCOEFF_NORMED) {
        /* Yellow growing redder as match certainty approaches 1.0.  This can
           only be applied with method == *_NORMED as the other match methods
           aren't normalized to be in range 0.0 - 1.0 */
        color = CV_RGB (255, 255 - pow (255, best_res), 32);
      } else {
        color = CV_RGB (255, 32, 32);
      }

      buf = gst_buffer_make_writable (buf);

      corner.x += filter->cvTemplateImage->width;
      corner.y += filter->cvTemplateImage->height;
      cvRectangle (filter->cvImage, best_pos, corner, color, 3, 8, 0);
    }

  }
  GST_OBJECT_UNLOCK (filter);

  if (m) {
    gst_element_post_message (GST_ELEMENT (filter), m);
  }
  return gst_pad_push (filter->srcpad, buf);
}
示例#20
0
文件: dlbk.cpp 项目: elfmedy/2013
//==============================================================================
//Binary image median filtering with a (2ts+1)^2 mask
void Cdlbk::Connect_Filter(int mWid, int mHei, unsigned char* pInput)
{	
	
	if (FT)
	{
		float flg_f=float(0.3);
		int ts = (int)(sqrt((float)FILTER_SIZE));
		unsigned int mask=0;
		unsigned int tem_sum=0;
		int i,j,m,n,x1,x2,y1,y2;
		unsigned char* ptem = new unsigned char[mWid*mHei];
		memset(ptem,0,sizeof(unsigned char)*mWid*mHei);
		unsigned char* pslipmast = new unsigned char[2*ts+1];
		memset(pslipmast,0,sizeof(unsigned char)*(2*ts+1));

		for( i=0; i<mHei; i+=1)
		{
			for( j=0; j<mWid; j+=1)
			{
				x1 = ( (j-ts) < 0    ) ? 0    : (j-ts);
				x2 = ( (j+ts) > mWid ) ? mWid : (j+ts);
				y1 = ( (i-ts) < 0    ) ? 0    : (i-ts);
				y2 = ( (i+ts) > mHei ) ? mHei : (i+ts);
				mask = (x2-x1+1)*(y2-y1+1);
				tem_sum=0;

				//for (n=y1; n<=y2; n++)
				//for (m=x1; m<=x2; m++)
				//	tem_sum+=pInput[n*mWid+m]/255;					
				//
				//float rst = (float)tem_sum/mask;
				//if(rst>flg_f)
				//ptem[i*mWid+j]=255;	
				
				if( mask!=(ts+ts+1)*(ts+ts+1) )
				{
					for (n=y1; n<=y2; n++)
					for (m=x1; m<=x2; m++)
						tem_sum+=pInput[n*mWid+m]/255;					

					float rst = (float)tem_sum/mask;
					if(rst>flg_f)
					ptem[i*mWid+j]=255;	
				}
				else
				{
					if(x1==0)//new row
					{
						for (m=x1; m<=x2; m++)
						{
							pslipmast[m] = 0;						
							for (n=y1; n<=y2; n++)					//cal every pslipmast element
								pslipmast[m]+=pInput[n*mWid+m]/255;					
						}
						
						tem_sum = 0;								//cal rst
						for (int k=0; k<=x2-x1; k++)
							tem_sum += pslipmast[k];
						float rst = (float)tem_sum/mask;
						if(rst>flg_f)
							ptem[i*mWid+j]=255;	
					}
					else
					{
						for (int q=0; q<x2-x1; q++)					 //slip buffer 
							pslipmast[q] = pslipmast[q+1];

						m = x2;										//cal last element of the slip buffer
						pslipmast[x2-x1]=0;
						for (n=y1; n<=y2; n++)
							pslipmast[x2-x1] += pInput[n*mWid+m]/255;					
						
						tem_sum = 0;								//cal rst
						for (int k=0; k<=x2-x1; k++)
							tem_sum += pslipmast[k];
						float rst = (float)tem_sum/mask;
						if(rst>flg_f)
							ptem[i*mWid+j]=255;	
					}			
				}						
			}
		}

		//update input data
		memcpy(pInput,ptem,sizeof(unsigned char)*mWid*mHei);
		delete [] ptem;
		delete [] pslipmast;
	}
	else
	{
		IplImage* pFore = cvCreateImageHeader(cvSize(mWid,mHei), 8, 1);
		cvSetData(pFore, pInput, mWid);	//4倍宽度
		CvMemStorage* storage = cvCreateMemStorage(0);
		CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
		cvFindContours( pFore, storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
		for (seq = first_seq; seq; seq = seq->h_next)
		{
			CvContour* cnt = (CvContour*)seq;
			double area = cvContourArea( cnt, CV_WHOLE_SEQ );
			if (fabs(area) <= FILTER_SIZE)
			{
				prev_seq = seq->h_prev;
				if( prev_seq )
				{
					prev_seq->h_next = seq->h_next;
					if( seq->h_next ) seq->h_next->h_prev = prev_seq;
				}
				else
				{
					first_seq = seq->h_next;
					if( seq->h_next ) seq->h_next->h_prev = NULL;
				}
			}
		}
		cvZero(pFore);
		cvDrawContours(pFore, first_seq, CV_RGB(255, 255, 255), CV_RGB(255, 255, 255), 10, -1);
		cvReleaseImageHeader(&pFore);
		cvReleaseMemStorage(&storage);
	}
}
示例#21
0
int main( int argc, char** argv )
{

  IplImage* img;  			 		// input image object 
  IplImage* grayImg = NULL;  		// tmp image object
  IplImage* thresholdedImg = NULL;  // threshold output image object
  IplImage* dst;					// output connected components
	
  int windowSize = 3; // starting threshold value
  int constant = 0; // starting constant value
  CvCapture* capture = NULL; // capture object
	
  const char* windowName1 = "OPENCV: adaptive image thresholding"; // window name
  const char* windowName2 = "OPENCV: grayscale image"; // window name
  const char* windowName3 = "OPENCV: adaptive threshold image"; // window name

	
  bool keepProcessing = true;	// loop control flag
  char key;						// user input	
  int  EVENT_LOOP_DELAY = 40;	// delay for GUI window
                                // 40 ms equates to 1000ms/25fps = 40ms per frame	
	
	
	
  // if command line arguments are provided try to read image/video_name
  // otherwise default to capture from attached H/W camera 

    if( 
	  ( argc == 2 && (img = cvLoadImage( argv[1], 1)) != 0 ) ||
	  ( argc == 2 && (capture = cvCreateFileCapture( argv[1] )) != 0 ) || 
	  ( argc != 2 && (capture = cvCreateCameraCapture( CAMERA_INDEX )) != 0 )
	  )
    {
      // create window objects

      cvNamedWindow(windowName1, 0 );
      cvNamedWindow(windowName2, 0 );
      cvNamedWindow(windowName3, 0 );		

	  // add adjustable trackbar for threshold parameter
		
      cvCreateTrackbar("Neighbourhood (N)", windowName3, &windowSize, 255, NULL);		
	  cvCreateTrackbar("Constant (C)", windowName3, &constant, 50, NULL);
		
	  // if capture object in use (i.e. video/camera)
	  // get initial image from capture object
			
	  if (capture) {
	
		  // cvQueryFrame s just a combination of cvGrabFrame 
		  // and cvRetrieveFrame in one call.
			  
		  img = cvQueryFrame(capture);
		  if(!img){
			if (argc == 2){				  
				printf("End of video file reached\n");
			} else {
				printf("ERROR: cannot get next fram from camera\n");
			}
			exit(0);
		  }
			  
	  }
	  
	  // create output image
	  
	  thresholdedImg = cvCreateImage(cvSize(img->width,img->height), 
						   img->depth, 1);
	  thresholdedImg->origin = img->origin;
	  grayImg = cvCreateImage(cvSize(img->width,img->height), 
				img->depth, 1);
	  grayImg->origin = img->origin;

	  dst = cvCloneImage(img);
	  
	  // create a set of random labels
	  	  
	  CvRNG rng = cvRNG(-1);
	  CvMat* color_tab = cvCreateMat( 1, 255, CV_8UC3 );
            for(int i = 0; i < 255; i++ )
            {
                uchar* ptr = color_tab->data.ptr + i*3;
                ptr[0] = (uchar)(cvRandInt(&rng)%180 + 50);
                ptr[1] = (uchar)(cvRandInt(&rng)%180 + 50);
                ptr[2] = (uchar)(cvRandInt(&rng)%180 + 50);
            }
	  CvMemStorage* storage = cvCreateMemStorage(0);
			
      CvSeq* contours = 0;
	  CvSeq* current_contour;
      int comp_count = 0;
	  
	  // start main loop	
		
	  while (keepProcessing) {
		
		  // if capture object in use (i.e. video/camera)
		  // get image from capture object
			
		  if (capture) {
	
			  // cvQueryFrame s just a combination of cvGrabFrame 
			  // and cvRetrieveFrame in one call.
			  
			  img = cvQueryFrame(capture);
			  if(!img){
				if (argc == 2){				  
					printf("End of video file reached\n");
				} else {
					printf("ERROR: cannot get next fram from camera\n");
				}
				exit(0);
			  }
			  
		  }	  
		  
		  // if input is not already grayscale, convert to grayscale
		
		  if (img->nChannels > 1){
			 cvCvtColor(img, grayImg, CV_BGR2GRAY);
	      } else {
			grayImg = img;
		  }
	  
		  // display image in window

		  cvShowImage( windowName2, grayImg );
	
		  // check that the window size is always odd and > 3

		  if ((windowSize > 3) && (fmod((double) windowSize, 2) == 0)) {
				windowSize++;
		  } else if (windowSize < 3) {
			  windowSize = 3;
		  }
		  
		  // threshold the image and display
		  
		  cvAdaptiveThreshold(grayImg, thresholdedImg, 255,
		  						CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, 
		  						windowSize, constant);
		  cvShowImage( windowName3, thresholdedImg );
		  
		 // find the contours	
		  
		  cvFindContours( thresholdedImg, storage, 
		  		&contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
		  
		  // if (contours) (cvSeqSort(contours, sort_contour, NULL));
		  
		  // draw the contours in the output image
			
		    cvZero(dst);
			current_contour = contours;
			comp_count = 0;
            for( ; current_contour != 0; current_contour = current_contour->h_next, comp_count++ )
            {
				uchar* ptr = color_tab->data.ptr + (comp_count)*3;
				CvScalar color = CV_RGB( ptr[0], ptr[1], ptr[2] );
                cvDrawContours( dst, current_contour, color,
                                color, -1, CV_FILLED, 8, cvPoint(0,0) );
            }
			if (contours != NULL){
				cvClearSeq(contours);
			}

		  
		  // display images in window

		  cvShowImage( windowName1, dst );
			
		  // start event processing loop (very important,in fact essential for GUI)
	      // 40 ms roughly equates to 1000ms/25fps = 40ms per frame
		  
		  key = cvWaitKey(EVENT_LOOP_DELAY);

		  if (key == 'x'){
			
	   		// if user presses "x" then exit
			
	   			printf("Keyboard exit requested : exiting now - bye!\n");	
	   			keepProcessing = false;
		  } 
	  }
      
      // destroy window objects
      // (triggered by event loop *only* window is closed)

      cvDestroyAllWindows();

      // destroy image object (if it does not originate from a capture object)

      if (!capture){
		  cvReleaseImage( &img );
      }
	  
	  // destroy image objects

      cvReleaseImage( &grayImg );
	  cvReleaseImage( &thresholdedImg );
	  cvReleaseImage( &dst );

      // all OK : main returns 0

      return 0;
    }

    // not OK : main returns -1

    return -1;
}
示例#22
0
void VisualOdometry::slotProcessImage(const QString& name, const QSize& imageSize, const QVector3D& position, const QQuaternion& orientation, const QByteArray imageData)
{
    mNumberOfFramesProcessed++;

    // first copy the current image into the previous image
    memcpy(mImage1->imageData, mImage2->imageData, mFrameSize.width * mFrameSize.height);

    // convert the YCbCr from the camera into the current gray image
    Camera::convertYCbCr422ToGray8((u_char*)imageData.constData(), (u_char*)mImage2->imageData, imageSize.width(), imageSize.height());

    // black out the top quater of the image because of propeller noise
    for(int i=0; i<mFrameSize.width*(mFrameSize.height/4); i++) mImage2->imageData[i]=0;

    // On the first execution, mImage1 is still undefined.
    if(mNumberOfFramesProcessed == 1) return;

    // reset debug image to black
    cvSet(mImageDebug, cvScalar(0));

    char filename[50];
    sprintf(filename, "image1_%d.jpg", mNumberOfFramesProcessed);
//    cvSaveImage(filename, mImage1);

    sprintf(filename, "image2_%d.jpg", mNumberOfFramesProcessed);
//    cvSaveImage(filename, mImage2);

//    qDebug() << "numberOfFeatures before is" << mNumberOfFeatures;
    cvGoodFeaturesToTrack(mImage1, mImageEigen, mImageTemp, mFeaturesFrame1, &mNumberOfFeatures, .01, .01, NULL);
//                qDebug() << "numberOfFeatures after  is" << mNumberOfFeatures;


    /* Actually run Pyramidal Lucas Kanade Optical Flow!!
     * "frame1_1C" is the first frame with the known features.
     * "frame2_1C" is the second frame where we want to find the first frame's features.
     * "pyramid1" and "pyramid2" are workspace for the algorithm.
     * "frame1_features" are the features from the first frame.
     * "frame2_features" is the (outputted) locations of those features in the second frame.
     * "number_of_features" is the number of features in the frame1_features array.
     * "optical_flow_window" is the size of the window to use to avoid the aperture problem.
     * "5" is the maximum number of pyramids to use.  0 would be just one level.
     * "optical_flow_found_feature" is as described above (non-zero iff feature found by the flow).
     * "optical_flow_feature_error" is as described above (error in the flow for this feature).
     * "mOpticalFlowTerminationCriteria" is as described above (how long the algorithm should look).
     * "0" means disable enhancements.  (For example, the second array isn't pre-initialized with guesses.)
     */
    cvCalcOpticalFlowPyrLK(
        mImage1,
        mImage2,
        mImagePyramid1,
        mImagePyramid2,
        mFeaturesFrame1,
        mFeaturesFrame2,
        mNumberOfFeatures,
        mOpticalFlowWindow,
        5,
        mOpticalFlowFoundFeature,
        mOpticalFlowFeatureError,
        mOpticalFlowTerminationCriteria,
        0);

    qint64 averageDirectionX = 0;
    qint64 averageDirectionY = 0;

    /* For fun (and debugging :)), let's draw the flow field. */
    for(int i = 0; i < mNumberOfFeatures; i++)
    {
        /* If Pyramidal Lucas Kanade didn't really find the feature, skip it. */
        if(mOpticalFlowFoundFeature[i] == 0) continue;

        CvPoint p1,p2;

        p1.x = (int) mFeaturesFrame1[i].x;
        p1.y = (int) mFeaturesFrame1[i].y;
        p2.x = (int) mFeaturesFrame2[i].x;
        p2.y = (int) mFeaturesFrame2[i].y;

        averageDirectionX += p1.x - p2.x;
        averageDirectionY += p1.y - p2.y;

//                        const float angle = atan2( (double) p1.y - p2.y, (double) p1.x - p2.x);
//                        const float hypotenuse = sqrt(SQUARE(p1.y - p2.y) + SQUARE(p1.x - p2.x));
        /* Here we lengthen the arrow by a factor of three. */
//                        p2.x = (int) (p1.x - 3 * hypotenuse * cos(angle));
//                        p2.y = (int) (p1.y - 3 * hypotenuse * sin(angle));

        /* Now we draw the main line of the arrow. */
        /* "mImage1" is the frame to draw on.
         * "p" is the point where the line begins.
         * "q" is the point where the line stops.
         * "CV_AA" means antialiased drawing.
         * "0" means no fractional bits in the center cooridinate or radius.
         */
        cvLine( mImage1, p1, p2, CV_RGB(255,255,255), 1, CV_AA, 0);

        /*
        // Now draw the tips of the arrow.  I do some scaling so that the tips look proportional to the main line of the arrow.
        p.x = (int) (q.x + 9 * cos(angle + pi / 4));
        p.y = (int) (q.y + 9 * sin(angle + pi / 4));
        cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
        p.x = (int) (q.x + 9 * cos(angle - pi / 4));
        p.y = (int) (q.y + 9 * sin(angle - pi / 4));
        cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
        */
    }

    CvPoint p1, p2;
    p1.x = mFrameSize.width/2;
    p1.y = mFrameSize.height/2;

    p2.x = p1.x + (averageDirectionX/10) + mLastAverageHorizontalSpeed;
    p2.y = p1.y;// + (averageDirectionY/5);

    mLastAverageHorizontalSpeed = averageDirectionX/12;

    QTextStream out(mLogFile);
    out << "frame " << mNumberOfFramesProcessed << " " << averageDirectionX << " " << averageDirectionY << '\n';

    cvLine( mImage1, p1, p2, CV_RGB(255,255,255), 1, CV_AA, 0 );
    sprintf(filename, "frame_%d.jpg", mNumberOfFramesProcessed);
//                cvSaveImage(filename, mImage1);
    cv::imwrite(std::string(filename), mImage1);


    qDebug() << "AVG DIRECTION" << averageDirectionX << averageDirectionY;
}
示例#23
0
文件: main.cpp 项目: kckemper/kemper
int main( int argc, char** argv ) {
	
	int		cmd;
	float	desired[8][8]	= { {1,0,0,0,0,0,0,0},	// 0
								{0,1,0,0,0,0,0,0},	// 1
								{0,0,1,0,0,0,0,0},	// 2
								{0,0,0,1,1,0,0,0},	// 3 forward
								{0,0,0,1,1,0,0,0},	// 4 forward
								{0,0,0,0,0,1,0,0},	// 5
								{0,0,0,0,0,0,1,0},	// 6
								{0,0,0,0,0,0,0,1}};	// 7
						 
	CvPoint		p,q;
	CvScalar	line_color		= CV_RGB(0,0,255);
	CvScalar	out_color;

	const char* name_orig		= "Original: press q to save images";
	const char* name_ave		= "input";
	const char* name_weights	= "weights";
	
	const char*	inputCmdFile_name	= "./inputs/dataset1/commandlist.dat";
	const char*	outputFile_name		= "./outputs/output_";
	FILE*		outputFile;
	FILE*		inputCmdFile;
	char		inputName[64];
	char		outputName[64];

	
	CvCapture* capture = cvCreateCameraCapture(0) ;
		
	CvSize frame_size;
	CvScalar ave = cvScalar(1);
	
	
	CvRect  slice_rect;
	CvSize	slice_size;

	static	IplImage*	frame				= NULL;
	static	IplImage*	frame_g				= NULL;
	static	IplImage*	frame_small			= NULL;
	static	IplImage*	frame_weights		= NULL;
	static	IplImage*	frame_w_big			= NULL;
	static	IplImage*	frame_w_final		= NULL;
	static	IplImage*	frame_final			= NULL;	
	
	static	IplImage*	ave_image			= NULL;
//	static	IplImage *scale					= NULL;
	
	static	IplImage*	frame_slices[N_SLICES];



	float	inputs[(SIZE/N_SLICES)*SIZE];
	float	outputs[N_SLICES];
	int		choices[N_SLICES];
//	float	desired[N_SLICES];
//	float	desired[] = {0,0,0,1,1,0,0,0};										//XXX dummy test...	

	//Evo (int nNets, int nInputs, int nHidden, int nOuts)
	Evo*	evoSlice;

	int		ep;
	int		trial;
	int		stepCnt;

	int		flag = 0;

	char	c;
	int		i,j,k,s;
	float	tmp;

////////////////////////////////////////////////////////////////////////////////
// init stuff

	
	inputCmdFile	= fopen(inputCmdFile_name,"r");
	if (inputCmdFile == NULL) {printf("Unable to open: %s",inputCmdFile_name); return 0; }
	
	// create windows for looking at stuff
//	cvNamedWindow( name_slice,	CV_WINDOW_AUTOSIZE );
	cvNamedWindow( name_weights,	CV_WINDOW_AUTOSIZE );
	cvNamedWindow( name_ave,		CV_WINDOW_AUTOSIZE );
	cvNamedWindow( name_orig,		CV_WINDOW_AUTOSIZE );
	

//	frame_size	= cvSize(frame->width,frame->height);
	frame_size	= cvSize(SIZE,SIZE);

#ifdef USECAM
	// capture a frame so we can get an idea of the size of the source
	frame = cvQueryFrame( capture );
	if( !frame ) return 0;
#else
	sprintf(inputName,"./inputs/dataset1/image0000000000.jpg");
	frame = cvLoadImage(inputName, 0 );
	if( !frame ){ printf("ERROR OPENING: %s!!!\n",inputName); return 0;}
#endif


	allocateOnDemand( &frame_g,			cvSize(frame->width,frame->height), IPL_DEPTH_8U, 1 );
	allocateOnDemand( &frame_w_big,		cvSize(frame->width,frame->height), IPL_DEPTH_8U, 1 );	
	allocateOnDemand( &frame_w_final,	cvSize(frame->width,frame->height), IPL_DEPTH_8U, 3 );	
	allocateOnDemand( &frame_final,		cvSize(frame->width,frame->height+20), IPL_DEPTH_8U, 3 );	
	
	
	allocateOnDemand( &ave_image,		frame_size, IPL_DEPTH_8U, 1 );
	allocateOnDemand( &frame_small,		frame_size, IPL_DEPTH_8U, 1 );
	allocateOnDemand( &frame_weights,	frame_size, IPL_DEPTH_8U, 1 );


	slice_size = cvSize(ave_image->width/N_SLICES, ave_image->height);


	for (i=0;i<N_SLICES;i++) {
		allocateOnDemand( &frame_slices[i], slice_size, IPL_DEPTH_8U, 1);
	}


	for(trial=0;trial<N_TRIALS;trial++) {


		sprintf(outputName,"%s%d.txt", outputFile_name, trial);
		outputFile		= fopen(outputName,"w");


		// init each leariner
		evoSlice = (Evo*)malloc(sizeof(Evo)*N_SLICES);
		for(i=0;i<N_SLICES;i++) {
			evoSlice[i] = Evo(N_NETS, (SIZE/N_SLICES)*SIZE, N_HIDDEN, 1);
			evoSlice[i].choose();
			choices[i] = evoSlice[i].choose();
		}

		ep		= 0;
		stepCnt = 0;
		flag	= 0;


		while(1) {

	////////////////////////////////////////////////////////////////////////////////
	// Pre processing		

#if 0
			// make blank image...
			cvSet(ave_image, cvScalar(0));

	
			for (i=0;i<NF;i++) {
	
				// get image
#ifdef USECAM
				frame = cvQueryFrame( capture );
				if( !frame ) break;
#else				
				sprintf(inputName,"./inputs/dataset1/image%010d.jpg",stepCnt);
				frame = cvLoadImage(inputName, 0 );
				if( !frame ){ printf("ERROR OPENING: %s!!!\n",inputName); return 0;}
				stepCnt++;
#endif			
				// convert it to grey
				cvConvertImage(frame, frame_g );//, CV_CVTIMG_FLIP);


				// resize
				cvResize(frame_g, frame_small);

				
				// take difference
				cvSub(frame_small, ave_image, ave_image);

			}

			for(j=0;j<SIZE;j++) {
				for(k=0;k<SIZE;k++) {
					PIX(ave_image,k,j) = (char)(PIX(ave_image,k,j)*10);
				}
			}

#endif

#if 0
			frame = cvQueryFrame( capture );
			if( !frame ) break;
			cvConvertImage(frame, frame_g );
			cvResize(frame_g, frame_small);
			cvConvertImage(frame_small, ave_image );
#endif
		
#if 1
			sprintf(inputName,"./inputs/dataset1/image%010d.jpg",stepCnt);
			frame = cvLoadImage(inputName, 0 );
			if( !frame ){ printf("ERROR OPENING: %s!!!\n",inputName); break;}
			cvConvertImage(frame, frame_g );
			cvResize(frame_g, frame_small);
			cvConvertImage(frame_small, ave_image );
			
//			cvCanny(ave_image, ave_image, 50, 40,5);
			
#endif
		
		
	//		cvDilate(ave_image, ave_image,NULL,4);


	////////////////////////////////////////////////////////////////////////////////
	// Generate NN inputs


			// slice it up
			for (i=0;i<N_SLICES;i++) {
	
				slice_rect = cvRect(i*ave_image->width/N_SLICES, 0, ave_image->width/N_SLICES, ave_image->height);

				cvSetImageROI(ave_image, slice_rect);
				
				cvCopy(ave_image, frame_slices[i], NULL);

			}

			cvResetImageROI(ave_image);  // remove this when we don't care about looking at the ave

	////////////////////////////////////////////////////////////////////////////////
	// Evaluate NN
			if (stepCnt == N_LEARN)
				flag = 1;
	
			if( (flag == 1) && (stepCnt%N_LEARN == 0)) {	// every N_LEARN images switch
		
				ep++;
				fprintf(outputFile,"%d",ep);

				for(i=0;i<N_SLICES;i++) {
					evoSlice[i].replace();
					choices[i] = evoSlice[i].choose();
				
					fprintf(outputFile,"\t%1.3f",evoSlice[i].netPool[evoSlice[i].best()].grade);
				
				}
			
				fprintf(outputFile,"\n");
			
				if(ep >= N_EPISODES) break;
			
				// draw weights image
				for(s=0;s<N_SLICES;s++) {
		
					for(j=0;j<SIZE;j++) {
						for(k=0;k<(SIZE/N_SLICES);k++) {
				
							tmp = 0;
							for(i=0;i<N_HIDDEN;i++) {
								tmp += evoSlice[s].mutant->nodeHidden->w[(j*(SIZE/N_SLICES))+k+1];
							}
					
							PIX(frame_weights,k+(s*SIZE/N_SLICES),j) = (char)((tmp/N_HIDDEN)*255 + 127);
		//					printf("%d\t",(char)((tmp/N_HIDDEN)*255));
						}
					}
				}

				cvResize(frame_weights, frame_w_big, CV_INTER_LINEAR);
				cvConvertImage(frame_w_big, frame_w_final);
			
			}


			fscanf(inputCmdFile,"%d",&cmd);

			printf("\nTrial: %d   Episode: %d   Devin's cmd: %d\n",trial,ep,cmd);
			for(i=0;i<N_SLICES;i++)
				printf("%1.3f\t",desired[cmd][i]);
			printf("\n");
		

			for(i=0;i<N_SLICES;i++) {
	//			cvShowImage( name_slice, frame_slices[i] );

				// strip pixel data into a single array
				for(j=0;j<SIZE;j++) {
					for(k=0;k<(SIZE/N_SLICES);k++) {
						inputs[(j*(SIZE/N_SLICES))+k]	= (float)PIX(frame_slices[i],k,j)/255.0;
					}
				}


	//			printf("\n%d: Eval slice %d\n",stepCnt,i);
				outputs[i] = evoSlice[i].eval(inputs, &desired[cmd][i]);
	//			outputs[i] = desired[i];
				printf("%1.3f\t",outputs[i]);

			}
			printf("\n");

			for(i=0;i<N_SLICES;i++) {
				printf("%d\t",choices[i]);
			}
			printf("\n");

			for(i=0;i<N_SLICES;i++) {
				printf("%1.3f\t",evoSlice[i].mutant->grade);
			}
			printf("\n");


		
	////////////////////////////////////////////////////////////////////////////////
	// GUI stuff

		
		
		
			// copy input image into larger final image
			cvSetImageROI(frame_final, cvRect(0, 0, frame_w_big->width, frame_w_big->height));
			cvConvertImage(frame, frame_final);
			cvResetImageROI(frame_final);

			// draw slice markers
			for(i=1;i<N_SLICES;i++) {
				// on the final frame...
				p.x = (int)(i*frame_final->width/N_SLICES);
				p.y = 0;
				q.x = p.x;
				q.y = (int)frame_final->height;
				cvLine( frame_final, p, q, line_color, 2, CV_AA, 0 );

				// on the weights
				p.x = (int)(i*frame_w_final->width/N_SLICES);
				p.y = 0;
				q.x = p.x;
				q.y = (int)frame_w_final->height;
				cvLine( frame_w_final, p, q, line_color, 2, CV_AA, 0 );
			}

			// draw output indicators
			for(i=0;i<N_SLICES;i++) {
				out_color = CV_RGB(outputs[i]*255,0,0);
				p.x = (int)(i*frame_final->width/N_SLICES);
				p.y = (int)(frame_final->height-20);
				q.x = (int)(p.x+frame_final->width/N_SLICES);
				q.y = (int)(p.y+20);
				cvRectangle( frame_final, p, q, out_color, CV_FILLED, CV_AA, 0 );
			}
		
		
			cvShowImage( name_ave,		ave_image );
			cvShowImage( name_orig,		frame_final );
			cvShowImage( name_weights,	frame_w_final );
		
			c = cvWaitKey(2);
		
			if( c == 27 ) break;
			else if( c == 'q') {
				cvSaveImage("weights.jpg",frame_w_final);
				cvSaveImage("output.jpg",frame_final);
			}

			stepCnt++;
			if (stepCnt>=(N_STEPS-(N_STEPS%N_LEARN))) {
				stepCnt=0;
				rewind(inputCmdFile);
			}


		} // end while

		free(evoSlice);
		fclose(outputFile);
	} // end trial for

////////////////////////////////////////////////////////////////////////////////
// clean up
//	delete &evo;

	fclose(inputCmdFile);
	
	cvReleaseCapture(	&capture );
	cvDestroyWindow(	name_ave );
	cvDestroyWindow(	name_orig );
	cvDestroyWindow(	name_weights );
}
示例#24
0
 void MarkFace(cv::Mat &source, const cv::Rect &face)
 {
     CvPoint topLeft = cvPoint(face.x, face.y);
     CvPoint downRight = cvPoint(face.x + face.width-1, face.y + face.height-1);
     rectangle( source, topLeft, downRight, CV_RGB(0,0,255));  
 }
示例#25
0
int main( int argc, char **argv ){ 
	int key;							//	キー入力用の変数
	CvCapture *capture = NULL;			//	カメラキャプチャ用の構造体
	IplImage *frameImage;				//	キャプチャ画像用IplImage
	IplImage *frameImage2;				//	キャプチャ画像用IplImage2

	//	画像を生成する
	IplImage *backgroundImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//背景画像用IplImage
	IplImage *grayImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//グレースケール画像用IplImage
	IplImage *differenceImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//差分画像用IplImage

	IplImage *hsvImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 );				//HSV画像用IplImage
	IplImage *hueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//色相(H)情報用IplImage
	IplImage *saturationImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//彩度(S)情報用IplImage
	IplImage *valueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//明度(V)情報用IplImage
	IplImage *thresholdImage1 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//明度がTHRES_BOTTOMより大きい領域用IplImage
//	IplImage *thresholdImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//明度がTHRES_TOP以下の領域用IplImage
//	IplImage *thresholdImage3 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//thresholdImage1とthresholdImage2のAND演算結果用IplImage
	IplImage *lightImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//光っている部分の領域の抽出結果用IplImage
	
	char windowNameCapture[] = "Capture"; 			//キャプチャした画像を表示するウィンドウの名前
	char windowNameLight[] = "Light";				//光っている部分の領域を表示するウィンドウの名前
	char windowNameCapture2[] = "Capture2"; 		//キャプチャした画像を表示するウィンドウの名前
	char windowNameThreshold[] = "Threshold";		//thresholdImage1を表示するウィンドウの名前

	CvMoments moment;
	double m_00;
	double m_10;
	double m_01;
	int gravityX;
	int gravityY;
	unsigned char h,s,v,r,g,b;
	int rr,gg,bb;
	int soundc=0;


	//	カメラを初期化する
	if ( ( capture = cvCreateCameraCapture( 0 ) ) == NULL ) {
		//	カメラが見つからなかった場合
		printf( "カメラが見つかりません\n" );
		return -1;
	}

	//	ウィンドウを生成する
	cvNamedWindow( windowNameCapture, CV_WINDOW_AUTOSIZE );
	cvNamedWindow( windowNameLight, CV_WINDOW_AUTOSIZE );
  	cvNamedWindow( windowNameCapture2, CV_WINDOW_AUTOSIZE );
	cvNamedWindow( windowNameThreshold, CV_WINDOW_AUTOSIZE );

  	//	初期背景を設定するためにカメラから画像を取得
	frameImage = cvQueryFrame( capture );
	//	frameImageをグレースケール化し、背景画像とする
	cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
	frameImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 );
	cvSet (frameImage2, CV_RGB(0,0,0));  //黒色で塗りつぶす

	//	メインループ
	while( 1 ) {
		//	captureの入力画像フレームをframeImageに格納する
		frameImage = cvQueryFrame( capture );
		//	frameImageをグレースケール化したものを、grayImageに格納する
		cvCvtColor( frameImage, grayImage, CV_BGR2GRAY );
		//	grayImageと背景画像との差分をとる
		cvAbsDiff( grayImage, backgroundImage, differenceImage );
		
		//	frameImageをBGRからHSVに変換する
		cvCvtColor( frameImage, hsvImage, CV_BGR2HSV );
		//	HSV画像をH、S、V画像に分ける
		cvSplit( hsvImage, hueImage, saturationImage, valueImage, NULL );
		//	明度が明るい部分を抽出、その部分のみ出力する
		cvThreshold( valueImage, thresholdImage1, THRESH_BOTTOM, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
//		cvThreshold( hueImage, thresholdImage2, THRESH_TOP, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV );
//		cvAnd( thresholdImage1, thresholdImage2, thresholdImage3, NULL );
		
		//	背景差分画像と明るい領域とのANDをとる
		cvAnd( differenceImage, thresholdImage1, lightImage, NULL );

		for(gravityY=0;gravityY<480;gravityY++){
			for(gravityX=0;gravityX<720;gravityX++){
//			h = hsvImage ->imageData[hsvImage ->widthStep * (gravityY) + gravityX * 3 ];     // H
//			s = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 1];    // S
			v = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 2];    // V
			
			b = frameImage ->imageData[frameImage ->widthStep * (gravityY) + gravityX * 3 ];     // B
			g = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 1];    // G
			r = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 2];    // R

			if (r>30){
				rr=250;
				gg=0;
				bb=0;
			}
			if (g>30){
				rr=0;
				gg=250;
				bb=0;
			}
			if (b>30){
				rr=0;
				gg=0;
				bb=250;
			}
			if ((b>30) && (r>30)){
				rr=250;
				gg=0;
				bb=250;
			}
			if ((b>30) && (g>30)){
				rr=0;
				gg=250;
				bb=250;
			}
			if ((b>30) && (g>30) && (r>30)){
				rr=250;
				gg=250;
				bb=250;
			}

		//	画像上に円を描画する
			if (v>80){
			cvCircle( frameImage2, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS, CV_RGB( rr, gg, bb ), LINE_THICKNESS, LINE_TYPE, 0 );
			soundc++;
				if(soundc %30==0){
					//音がなるワンチャン
					PlaySound("notify.wav" , NULL , SND_FILENAME | SND_ASYNC);
					soundc=0;
			}
			}
			}
		}
	
		
		//	光っている領域の重心を算出する
/*		cvMoments( lightImage, &moment, 0 );
		m_00 = cvGetSpatialMoment( &moment, 0, 0 );
		m_10 = cvGetSpatialMoment( &moment, 1, 0 );
		m_01 = cvGetSpatialMoment( &moment, 0, 1 );
		gravityX = m_10 / m_00;
		gravityY = m_01 / m_00;

		if (0<gravityX){
			h = hsvImage ->imageData[hsvImage ->widthStep * (gravityY) + gravityX * 3 ];     // H
			s = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 1];    // S
			v = hsvImage ->imageData[hsvImage ->widthStep * gravityY + gravityX * 3 + 2];    // V

			b = frameImage ->imageData[frameImage ->widthStep * (gravityY) + gravityX * 3 ];     // B
			g = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 1];    // G
			r = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 2];    // R

			printf ("x= %d ,y= %d v= %d,s= %d,h= %d  \n" ,gravityX,gravityY,r,g,b);
			
			rr=250;
			gg=250;
			bb=250;

			if (r>200){
				rr=150;
				gg=0;
				bb=0;
			}
			if (g>200){
				rr=0;
				gg=150;
				bb=0;
			}
			if (b>200){
				rr=0;
				gg=0;
				bb=150;
			}

		//	画像上に円を描画する
			if (v>200){
				
				cvCircle( frameImage2, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS, CV_RGB( rr, gg, bb ), LINE_THICKNESS, LINE_TYPE, 0 );

			}
		}
*/
		//	画像を表示する
		cvShowImage( windowNameCapture, frameImage );
		cvShowImage( windowNameLight, lightImage );
		cvShowImage( windowNameCapture2,   frameImage2);
		cvShowImage( windowNameThreshold, thresholdImage1);

		//	キー入力判定
		key = cvWaitKey( 10 );
		if( key == 'q' ) 
			//	'q'キーが押されたらループを抜ける
			break;
		else if( key == 'b' ) {
			//	'b'キーが押されたら、その時点での画像を背景画像とする
			frameImage = cvQueryFrame( capture );
		    cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
		}
		else if(key == 'c') {
			//	'c'キーが押されたら画像を保存
			cvSet (frameImage2, CV_RGB(0,0,0));  //黒色で塗りつぶす
		}
	}
	//	キャプチャを解放する
	cvReleaseCapture( &capture );
	//	メモリを解放する
	cvReleaseImage( &backgroundImage );
	cvReleaseImage( &grayImage );
	cvReleaseImage( &differenceImage );
	cvReleaseImage( &hsvImage );
	cvReleaseImage( &hueImage );
	cvReleaseImage( &saturationImage );
	cvReleaseImage( &valueImage );
	cvReleaseImage( &thresholdImage1 );
//	cvReleaseImage( &thresholdImage2 );
//	cvReleaseImage( &thresholdImage3 );
	cvReleaseImage( &lightImage );
	//	ウィンドウを破棄する
	cvDestroyWindow( windowNameCapture );
	cvDestroyWindow( windowNameLight );
	cvDestroyWindow( windowNameThreshold );
	cvDestroyWindow( windowNameCapture2 );

	return 0;
} 
示例#26
0
// configuration parameters
#define NUM_COMNMAND_LINE_ARGUMENTS 2
#define CAMERA_FRAME_WIDTH 640
#define CAMERA_FRAME_HEIGHT 360
#define CAMERA_FORMAT CV_8UC1
#define CAMERA_FPS 30
#define CAMERA_BRIGHTNESS 128
#define CAMERA_CONTRAST 10
#define CAMERA_SATURATION 0
#define CAMERA_HUE 0
#define CAMERA_GAIN 0
#define CAMERA_EXPOSURE -6
#define CAMERA_CONVERT_RGB false

// color constants
CvScalar COLOR_WHITE = CV_RGB(255, 255, 255);
CvScalar COLOR_RED = CV_RGB(255, 0, 0);
CvScalar COLOR_GREEN = CV_RGB(0, 255, 0);
CvScalar COLOR_BLUE = CV_RGB(0, 0, 255);
CvScalar COLOR_YELLOW = CV_RGB(255, 255, 0);
CvScalar COLOR_MAGENTA = CV_RGB(255, 0, 255);

/*******************************************************************************************************************//**
 * @brief Program entry point
 *
 * Handles image processing and display of annotated results
 *
 * @param[in] argc command line argument count
 * @param[in] argv command line argument vector
 * @returnS return status
 * @author Christopher D. McMurrough
unsigned long Test()
{
	bool visualize = true;
	std::string RangeWindowName = "Range";
	std::string HoughWindowName = "Hough";

	IplImage* visualizationReferenceImage = cvLoadImage("Pictures/building.jpg");
	CvMemStorage* storage = cvCreateMemStorage(0);	/// Line endings storage
	CvSeq* lines = 0;
	int AngleBins = 45;	/// Controls angle resolution of Hough trafo (bins per pi)
	IplImage* Image = cvCreateImage(cvGetSize(visualizationReferenceImage), IPL_DEPTH_8U, 3);	/// Visualization image
	cvCopyImage(visualizationReferenceImage,Image);
	IplImage* GrayImage = cvCreateImage(cvGetSize(Image), IPL_DEPTH_8U, 1);
	cvCvtColor(Image, GrayImage, CV_RGB2GRAY);
	IplImage* CannyImage = cvCreateImage(cvGetSize(Image), IPL_DEPTH_8U, 1);	/// Edge image
	cvCanny(GrayImage, CannyImage, 25, 50);
	CvPoint ROIp1 = cvPoint(100,10);		/// Tablet ROI
	CvPoint ROIp2 = cvPoint(visualizationReferenceImage->width-40+ROIp1.x,visualizationReferenceImage->height-200+ROIp1.y);
	cvSetImageROI(CannyImage,cvRect(ROIp1.x,ROIp1.y,ROIp2.x-ROIp1.x,ROIp2.y-ROIp1.y));
	cvRectangle(Image, ROIp1, ROIp2, CV_RGB(0,255,0));
	//int maxd = cvRound(sqrt(sqrt((double)ROIp2.x-ROIp1.x)+sqrt((double)ROIp2.y-ROIp1.y)));	/// Maximum of possible distance value in Hough space
	int maxd = cvRound(sqrt((double)(((ROIp2.x-ROIp1.x)*(ROIp2.x-ROIp1.x))+((ROIp2.y-ROIp1.y)*(ROIp2.y-ROIp1.y)))));	/// Maximum of possible distance value in Hough space
	IplImage* HoughSpace = cvCreateImage(cvSize(maxd,AngleBins+1),IPL_DEPTH_8U, 1);		/// Hough space image (black=no line, white=there are lines at these bins)
	cvZero(HoughSpace);
	
	/// Hough transformation
	int AccumulatorThreshold = 100;		/// Threshold parameter. A line is returned by the function if the corresponding accumulator value is greater than threshold.
	double MinimumLineLength = 50;		/// For probabilistic Hough transform it is the minimum line length.
	double MaximumGap = 4;				/// For probabilistic Hough transform it is the maximum gap between line segments lieing on the same line to treat them as the single line segment (i.e. to join them).
	lines = cvHoughLines2(CannyImage, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/AngleBins, AccumulatorThreshold, MinimumLineLength, MaximumGap);
	
	for(int i = 0; i < lines->total; i++ )
	{
		CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
		/// Endings of a line
		CvPoint p0 = cvPoint(line[0].x+ROIp1.x,line[0].y+ROIp1.y);
		CvPoint p1 = cvPoint(line[1].x+ROIp1.x,line[1].y+ROIp1.y);
		cvLine(Image, p0, p1, CV_RGB(255,0,0), 3, 8 );
		
		/// Slope/angle of line
		double phi = CV_PI/2;
		if(p0.x != p1.x) phi = atan((double)(p1.y-p0.y)/(double)(p1.x-p0.x));
		phi += (phi < 0)*CV_PI;

		/// Hessian normal form parameters: d = x*cos(alpha) + y*sin(alpha)
		/// with alpha in [0...pi], d in [0...maxd]
		double alpha = phi+CV_PI/2;
		alpha -= (alpha > CV_PI)*CV_PI;

		double d = p0.x;
		if(p0.x != p1.x)
		{
			double n = p1.y - (p1.y-p0.y)/(p1.x-p0.x) * p1.x;
			d = abs(n * cos(phi));
		}

		/// Write Line into Hough space
		cvLine(HoughSpace, cvPoint(cvRound(d),cvRound(alpha/CV_PI*AngleBins)),cvPoint(cvRound(d),cvRound(alpha/CV_PI*AngleBins)),CV_RGB(255,255,255));
	}
	if(visualize)
	{
		cvNamedWindow(RangeWindowName.c_str());
		cvNamedWindow(HoughWindowName.c_str());
		cvShowImage(RangeWindowName.c_str(), Image);
		cvShowImage(HoughWindowName.c_str(), HoughSpace);
		cvWaitKey(0);
	}
	cvCopyImage(Image,visualizationReferenceImage);

	cvReleaseImage(&GrayImage);
	cvReleaseImage(&CannyImage);

	/*
	IplImage* img1 = cvLoadImage("Cob3.jpg");
	IplImage* img2 = cvCreateImage(cvGetSize(img1),img1->depth,1);
	IplImage* img3 = cvCreateImage(cvGetSize(img1),img1->depth,1);
	IplImage* img4 = cvCreateImage(cvGetSize(img1),img1->depth,1);
	cvNamedWindow("Img1");
	cvNamedWindow("Img2");
	cvNamedWindow("Img3");
	cvCvtColor(img1, img2, CV_RGB2GRAY);
	cvCanny(img2, img3, 100, 200);
	cvShowImage("Img1", img1);
	cvShowImage("Img2", img2);
	cvShowImage("Img3", img3);
	cvWaitKey(0);
	cvReleaseImage(&img1);
	cvReleaseImage(&img2);
	cvReleaseImage(&img3);
	cvDestroyAllWindows();

	/*
	IplImage* Img1;
	IplImage* Img2;
	IplImage* Img3;

	cvNamedWindow("Img");
	while (cvGetWindowHandle("Img"))
	{
		if(cvWaitKey(10)=='q') break;

		/// Uncomment when using <code>GetColorImage</code> instead of <code>GetColorImage2</code>
	    //ColorImage = cvCreateImage(cvSize(1388,1038),IPL_DEPTH_8U,3);
		if (colorCamera->GetColorImage2(&Img1) == libCameraSensors::RET_FAILED)
		//if (colorCamera->GetColorImage(ColorImage, true) == libCameraSensors::RET_FAILED)
		{
			std::cerr << "TestCameraSensors: Color image acquisition failed\n";
			getchar();
			return ipa_utils::RET_FAILED;
		}

		if (colorCamera->GetColorImage2(&Img2) == libCameraSensors::RET_FAILED)
		{
			std::cerr << "TestCameraSensors: Color image acquisition failed\n";
			getchar();
			return ipa_utils::RET_FAILED;
		}

		Img3 = cvCreateImage(cvGetSize(Img1),Img1->depth,Img1->nChannels);
		cvSub(Img1, Img2, Img3);
		cvShowImage("Img", Img3);

		cvReleaseImage(&Img1);
		cvReleaseImage(&Img2);
		cvReleaseImage(&Img3);
	}*/

	return ipa_utils::RET_OK;
}
示例#28
0
int _tmain(int argc, _TCHAR* argv[])
{
	TVAInitParams params;
	TVAHumanInit hparams;
	TVAHumanResult result;// 
	result.objects = new TVAHumanBlob[cNumObjects];
	result.num     = 0;
	if (!LoadInitParams("init.xml", &params))
	{
		printf("Cannot load params.\n");
		return 0;
	}

	hparams.maxHeight = 90;
	hparams.maxWidth  = 30;
	hparams.minHeight = 30;
	hparams.minWidth  = 10;
	hparams.numObects = cNumObjects;

	HANDLE hHuman = humanCreate(&params, &hparams);
	if (!hHuman)
		return 0;

	CvCapture* capture = NULL;

	if (argc < 2)
		capture = cvCaptureFromCAM(0);
	else
		capture = cvCaptureFromFile(argv[1]);

	if (capture == NULL)
	{
		printf("%s\n", "Cannot open camera.");
		return -1;
	}


	for (;;) 
	{
		IplImage* frame = NULL;
		frame = cvQueryFrame(capture);
		if (!frame)
			break;

		humanProcess(hHuman, frame->width, frame->height, frame->nChannels, (unsigned char*)frame->imageData, &result);
		if (result.num > 0)
		{

			for (int i = 0; i < result.num; i++)
			{
				
				CvScalar color;
				color = CV_RGB(200, 200, 200);

				CvPoint p1,p2;

				p1.x = result.objects[i].XPos;
				p1.y = result.objects[i].YPos;

				p2.x = p1.x + result.objects[i].Width;
				p2.y = p1.y + result.objects[i].Height;

				cvRectangle(frame, p1, p2, color);
			}
		}

		cvShowImage(_MODULE_, frame);

		int c;
		c = cvWaitKey(10);
		if ((char)c == 27)
			break;

	}
	humanRelease(&hHuman);
	free(result.objects);
	cvReleaseCapture(&capture);
	return 0;
}
示例#29
0
bool face_detector::get_face_parameters(Mat &matImg)
{
    bool nRetCode = false;
    vector<Rect> faces;

    IplImage *orig_img = cvCloneImage(input_img);
    matImg = orig_img;
    cascadeFace.detectMultiScale(matImg, faces, 1.1, 2, 0, Size(40, 40));

    if(faces.empty() == false) {
        //取最大的脸
        Rect largestFace;
        largestFace.width=0;
        largestFace.height=0;
        for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++) {
            if( (r->width * r->height) > (largestFace.width * largestFace.height) )
                largestFace = *r;
        }

        // 将最大人脸区域赋给area
        area.left  = largestFace.x;
        area.right  = largestFace.x + largestFace.width;
        area.top  = largestFace.y;
        area.bottom = largestFace.y + largestFace.height;
        /*********/
        //printf("left:%d, right: %d, top: %d, bottom: %d\n", area.left, area.right, area.top, area.bottom);
        /*********/
        ///////////针对最大的脸检测人眼////////////////////////////////
        Mat smallImgROI;
        Rect eyeArea = largestFace;
        eyeArea.height = eyeArea.height/1.2; //仅对人脸的上半部分检测人眼,以减少错误率 //调整一下参数,只对上半部分有时检测不出来
        smallImgROI = matImg(eyeArea);
        Rect leftEyeRect(0,0,0,0), rightEyeRect(0,0,0,0);
        if(useSingleEyeClassifier) {
            // 使用双眼一起的单分类器检测眼
            vector<Rect> eyes;
            cascadeEye.detectMultiScale( smallImgROI, eyes, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
            if(eyes.size()>=2) { //必须至少有两只眼被检出
                vector<Rect>::const_iterator nr = eyes.begin();
                leftEyeRect = *nr;
                nr++;
                rightEyeRect = *nr;
            } else {
                fprintf(stderr, "必须至少有两只眼被检出!\n");
            }
        } else {
            //使用左右眼分开的两个分类器检测眼
            vector<Rect> leftEye, rightEye;
            cascadeLeftEye.detectMultiScale( smallImgROI, leftEye, 1.1, 2, CV_HAAR_SCALE_IMAGE, Size(20, 20) );
            cascadeRightEye.detectMultiScale( smallImgROI, rightEye, 1.1, 2, CV_HAAR_SCALE_IMAGE, Size(20, 20) );
            if(leftEye.empty() == false)
                leftEyeRect = *(leftEye.begin());
            if(rightEye.empty() == false)
                rightEyeRect = *(rightEye.begin());
        }

        iris_point.xleft = cvRound(largestFace.x + leftEyeRect.x + leftEyeRect.width*0.5);  //左眼中心的x坐标
        iris_point.yleft = cvRound(largestFace.y + leftEyeRect.y + leftEyeRect.height*0.5);  //左眼中心的y坐标
        iris_point.xright = cvRound(largestFace.x + rightEyeRect.x + rightEyeRect.width*0.5);  //右眼中心的x坐标
        iris_point.yright = cvRound(largestFace.y + rightEyeRect.y + rightEyeRect.height*0.5); //右眼中心的y坐标
        nRetCode = true;
        //对眼睛的后期验证:
        //不允许左眼在右眼右边
//        if(iris_point.xleft >= iris_point.xright )
//            fprintf(stderr, "11111111111"), nRetCode = false;
        //不允许眼睛在边界(由于,初始化的值为0,这也意味着如果少于两个眼检测出来,则认为检测失败)
        if( (iris_point.xleft==0) || (iris_point.yleft==0) ||(iris_point.xright==0) || (iris_point.yright==0) )
            fprintf(stderr, "22222222222222222\n"), nRetCode = false;
        //不允许两只眼上下倾斜过多(也防止一些误检)
        if(abs(iris_point.yright-iris_point.yleft) > (largestFace.width/3) )
            fprintf(stderr, "3333333333333333\n"), nRetCode = false;
        //不允许两只眼左右间距小于1/4人脸宽度(也防止一些误检)
        if(abs(iris_point.xright-iris_point.xleft) < (largestFace.width/4) )
            fprintf(stderr, "4444444444\n"), nRetCode = false;
        // 输入的gray_image水平是反转的,虽然并不影响这个函数的执行,
        // 但是为了达到左眼是右眼,右眼是左眼,在最后需要把左右眼对调一下。
//        int tmpSwap;
//        tmpSwap = iris_point.xleft;
//        iris_point.xleft = iris_point.xright;
//        iris_point.xright = tmpSwap;
//        tmpSwap = iris_point.yleft;
//        iris_point.yleft = iris_point.yright;
//        iris_point.yright = tmpSwap;
        /******************/
        //printf("xleft: %d, xright: %d, yleft: %d, yright: %d\n", iris_point.xleft, iris_point.xright, iris_point.yleft, iris_point.yright);
        /******************/
        //画出框到的人脸,验证调试用
        Point left_top, right_bottom;
        left_top.x = area.left;
        left_top.y = area.top;
        right_bottom.x = area.right;
        right_bottom.y = area.bottom;
        rectangle(matImg, left_top, right_bottom, CV_RGB(0,255,0), 2, 8, 0);
        left_top.x = iris_point.xleft;
        left_top.y = iris_point.yleft;
        right_bottom.x = left_top.x;
        right_bottom.y = left_top.y;
        rectangle(matImg, left_top, right_bottom, CV_RGB(90, 255, 0), 2, 8, 0);
        left_top.x = iris_point.xright;
        left_top.y = iris_point.yright;
        right_bottom.x = left_top.x;
        right_bottom.y = left_top.y;
        rectangle(matImg, left_top, right_bottom, CV_RGB(0, 255, 0), 2, 8, 0);

    }
    cvReleaseImage(&orig_img);
    return nRetCode;
}
示例#30
0
int main( int argc, char** argv )
{
    forceUSLocaleToKeepOurSanity();

    CvSize board_size = {0,0};
    float square_size = 1.f, aspect_ratio = 1.f;
    const char* out_filename = "out_camera_data.yml";
    const char* input_filename = 0;
    int i, image_count = 10;
    int write_extrinsics = 0, write_points = 0;
    int flags = 0;
    CvCapture* capture = 0;
    FILE* f = 0;
    char imagename[1024];
    CvMemStorage* storage;
    CvSeq* image_points_seq = 0;
    int elem_size, flip_vertical = 0;
    int delay = 1000;
    clock_t prev_timestamp = 0;
    CvPoint2D32f* image_points_buf = 0;
    CvFont font = cvFont( 1, 1 );
    double _camera[9], _dist_coeffs[4];
    CvMat camera = cvMat( 3, 3, CV_64F, _camera );
    CvMat dist_coeffs = cvMat( 1, 4, CV_64F, _dist_coeffs );
    CvMat *extr_params = 0, *reproj_errs = 0;
    double avg_reproj_err = 0;
    int mode = DETECTION;
    int undistort_image = 0;
    CvSize img_size = {0,0};
    const char* live_capture_help =
        "When the live video from camera is used as input, the following hot-keys may be used:\n"
            "  <ESC>, 'q' - quit the program\n"
            "  'g' - start capturing images\n"
            "  'u' - switch undistortion on/off\n";

    if( argc < 2 )
    {
        printf( "This is a camera calibration sample.\n"
            "Usage: calibration\n"
            "     -w <board_width>         # the number of inner corners per one of board dimension\n"
            "     -h <board_height>        # the number of inner corners per another board dimension\n"
            "     [-n <number_of_frames>]  # the number of frames to use for calibration\n"
            "                              # (if not specified, it will be set to the number\n"
            "                              #  of board views actually available)\n"
            "     [-d <delay>]             # a minimum delay in ms between subsequent attempts to capture a next view\n"
            "                              # (used only for video capturing)\n"
            "     [-s <square_size>]       # square size in some user-defined units (1 by default)\n"
            "     [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
            "     [-op]                    # write detected feature points\n"
            "     [-oe]                    # write extrinsic parameters\n"
            "     [-zt]                    # assume zero tangential distortion\n"
            "     [-a <aspect_ratio>]      # fix aspect ratio (fx/fy)\n"
            "     [-p]                     # fix the principal point at the center\n"
            "     [-v]                     # flip the captured images around the horizontal axis\n"
            "     [input_data]             # input data, one of the following:\n"
            "                              #  - text file with a list of the images of the board\n"
            "                              #  - name of video file with a video of the board\n"
            "                              # if input_data not specified, a live view from the camera is used\n"
            "\n" );
        printf( "%s", live_capture_help );
        return 0;
    }

    for( i = 1; i < argc; i++ )
    {
        const char* s = argv[i];
        if( strcmp( s, "-w" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.width ) != 1 || board_size.width <= 0 )
                return fprintf( stderr, "Invalid board width\n" ), -1;
        }
        else if( strcmp( s, "-h" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.height ) != 1 || board_size.height <= 0 )
                return fprintf( stderr, "Invalid board height\n" ), -1;
        }
        else if( strcmp( s, "-s" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &square_size ) != 1 || square_size <= 0 )
                return fprintf( stderr, "Invalid board square width\n" ), -1;
        }
        else if( strcmp( s, "-n" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &image_count ) != 1 || image_count <= 3 )
                return printf("Invalid number of images\n" ), -1;
        }
        else if( strcmp( s, "-a" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &aspect_ratio ) != 1 || aspect_ratio <= 0 )
                return printf("Invalid aspect ratio\n" ), -1;
        }
        else if( strcmp( s, "-d" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
                return printf("Invalid delay\n" ), -1;
        }
        else if( strcmp( s, "-op" ) == 0 )
        {
            write_points = 1;
        }
        else if( strcmp( s, "-oe" ) == 0 )
        {
            write_extrinsics = 1;
        }
        else if( strcmp( s, "-zt" ) == 0 )
        {
            flags |= CV_CALIB_ZERO_TANGENT_DIST;
        }
        else if( strcmp( s, "-p" ) == 0 )
        {
            flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
        }
        else if( strcmp( s, "-v" ) == 0 )
        {
            flip_vertical = 1;
        }
        else if( strcmp( s, "-o" ) == 0 )
        {
            out_filename = argv[++i];
        }
        else if( s[0] != '-' )
            input_filename = s;
        else
            return fprintf( stderr, "Unknown option %s", s ), -1;
    }

    if( input_filename )
    {
        fprintf( stderr, "Trying to open %s \n" , input_filename );

        capture = cvCreateFileCapture( input_filename );
        if( !capture )
        {
            fprintf(stderr,"Warning , cvCreateFileCapture failed to open %s \n",input_filename);
            f = fopen( input_filename, "rt" );
            if( !f )
                return fprintf( stderr, "The input file could not be opened\n" ), -1;
            image_count = -1;
        }
        mode = CAPTURING;
    }
    else
        capture = cvCreateCameraCapture(0);

    if( !capture && !f )
        return fprintf( stderr, "Could not initialize video capture\n" ), -2;

    if( capture )
        printf( "%s", live_capture_help );

    elem_size = board_size.width*board_size.height*sizeof(image_points_buf[0]);
    storage = cvCreateMemStorage( MAX( elem_size*4, 1 << 16 ));
    image_points_buf = (CvPoint2D32f*)cvAlloc( elem_size );
    image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );

    cvNamedWindow( "Image View", 1 );

    for(;;)
    {
        IplImage *view = 0, *view_gray = 0;
        int count = 0, found, blink = 0;
        CvPoint text_origin;
        CvSize text_size = {0,0};
        int base_line = 0;
        char s[100];
        int key;

        if( f && fgets( imagename, sizeof(imagename)-2, f ))
        {
            int l = strlen(imagename);
            if( l > 0 && imagename[l-1] == '\n' )
                imagename[--l] = '\0';
            if( l > 0 )
            {
                if( imagename[0] == '#' )
                    continue;
                view = cvLoadImage( imagename, 1 );
            }
        }
        else if( capture )
        {
            IplImage* view0 = cvQueryFrame( capture );
            if( view0 )
            {
                view = cvCreateImage( cvGetSize(view0), IPL_DEPTH_8U, view0->nChannels );
                if( view0->origin == IPL_ORIGIN_BL )
                    cvFlip( view0, view, 0 );
                else
                    cvCopy( view0, view );
            }
        }

        if( !view )
        {
            if( image_points_seq->total > 0 )
            {
                image_count = image_points_seq->total;
                goto calibrate;
            }
            break;
        }

        if( flip_vertical )
            cvFlip( view, view, 0 );

        img_size = cvGetSize(view);
        found = cvFindChessboardCorners( view, board_size,
            image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH );

#if 1
        // improve the found corners' coordinate accuracy
        view_gray = cvCreateImage( cvGetSize(view), 8, 1 );
        cvCvtColor( view, view_gray, CV_BGR2GRAY );
        cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11),
            cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
        cvReleaseImage( &view_gray );
#endif

        if( mode == CAPTURING && found && (f || clock() - prev_timestamp > delay*1e-3*CLOCKS_PER_SEC) )
        {
            cvSeqPush( image_points_seq, image_points_buf );
            prev_timestamp = clock();
            blink = !f;
#if 1
            if( capture )
            {
                sprintf( imagename, "view%05d.png", image_points_seq->total - 1 );
                cvSaveImage( imagename, view );
            }
#endif
        }

        cvDrawChessboardCorners( view, board_size, image_points_buf, count, found );

        cvGetTextSize( "100/100", &font, &text_size, &base_line );
        text_origin.x = view->width - text_size.width - 10;
        text_origin.y = view->height - base_line - 10;

        if( mode == CAPTURING )
        {
            if( image_count > 0 )
                sprintf( s, "%d/%d", image_points_seq ? image_points_seq->total : 0, image_count );
            else
                sprintf( s, "%d/?", image_points_seq ? image_points_seq->total : 0 );
        }
        else if( mode == CALIBRATED )
            sprintf( s, "Calibrated" );
        else
            sprintf( s, "Press 'g' to start" );

        cvPutText( view, s, text_origin, &font, mode != CALIBRATED ?
                                   CV_RGB(255,0,0) : CV_RGB(0,255,0));

        if( blink )
            cvNot( view, view );

        if( mode == CALIBRATED && undistort_image )
        {
            IplImage* t = cvCloneImage( view );
            cvUndistort2( t, view, &camera, &dist_coeffs );
            cvReleaseImage( &t );
        }

        cvShowImage( "Image View", view );
        key = cvWaitKey(capture ? 50 : 500);

        if( key == 27 )
            break;

        if( key == 'u' && mode == CALIBRATED )
            undistort_image = !undistort_image;

        if( capture && key == 'g' )
        {
            mode = CAPTURING;
            cvClearMemStorage( storage );
            image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
        }

        if( mode == CAPTURING && (unsigned)image_points_seq->total >= (unsigned)image_count )
        {
calibrate:
            cvReleaseMat( &extr_params );
            cvReleaseMat( &reproj_errs );
            int code = run_calibration( image_points_seq, img_size, board_size,
                square_size, aspect_ratio, flags, &camera, &dist_coeffs, &extr_params,
                &reproj_errs, &avg_reproj_err );
            // save camera parameters in any case, to catch Inf's/NaN's
            save_camera_params( out_filename, image_count, img_size,
                board_size, square_size, aspect_ratio, flags,
                &camera, &dist_coeffs, write_extrinsics ? extr_params : 0,
                write_points ? image_points_seq : 0, reproj_errs, avg_reproj_err );
            if( code )
                mode = CALIBRATED;
            else
                mode = DETECTION;
        }

        if( !view )
            break;
        cvReleaseImage( &view );
    }

    if( capture )
        cvReleaseCapture( &capture );
    if( storage )
        cvReleaseMemStorage( &storage );
    return 0;
}