Esempio n. 1
0
int main() {
	raspicam::RaspiCam_Cv Camera; // Camera Object
	cv::Mat frame;

	// Set camera params
	Camera.set(CV_CAP_PROP_FORMAT, CV_8UC3); // For color
	Camera.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	Camera.set(CV_CAP_PROP_FRAME_HEIGHT, 480);

	// Open camera
    std::cout << "Opening camera...\n";
    if (! Camera.open()) {
    	std::cerr << "Error opening camera!\n";
    	return -1;
    }


	// The two windows we'll be using
  cvNamedWindow("video");
	cvNamedWindow("thresh");

	cvMoveWindow("video", 0, 0);
	cvMoveWindow("thresh", 240, 0);

	int thresh_h[] {0, 18};
	int thresh_s[] {160, 255};
	int thresh_v[] {144, 255};
	const int max_thresh(255);

	cv::createTrackbar(" H min:", "thresh", &thresh_h[0], max_thresh, nullptr);
	cv::createTrackbar(" H max:", "thresh", &thresh_h[1], max_thresh, nullptr);
	cv::createTrackbar(" S min:", "thresh", &thresh_s[0], max_thresh, nullptr);
	cv::createTrackbar(" S max:", "thresh", &thresh_s[1], max_thresh, nullptr);
	cv::createTrackbar(" V min:", "thresh", &thresh_v[0], max_thresh, nullptr);
	cv::createTrackbar(" V max:", "thresh", &thresh_v[1], max_thresh, nullptr);

	// This image holds the "scribble" data, the tracked positions of the ball
	IplImage* imgScribble = NULL;

	cv::Mat frame_mat;

	while (true) {
		if (! Camera.grab()) {
			break;
		}
		Camera.retrieve(frame_mat);

		// Will hold a frame captured from the camera
		IplImage frame = frame_mat;

		// If this is the first frame, we need to initialize it
		if (imgScribble == NULL) {
			imgScribble = cvCreateImage(cvGetSize(&frame), 8, 3);
		}

		// Holds the yellow thresholded image (yellow = white, rest = black)
		IplImage* imgYellowThresh = GetThresholdedImage(&frame, thresh_h, thresh_s, thresh_v);

		// Calculate the moments to estimate the position of the ball
		CvMoments moments;
		cvMoments(imgYellowThresh, &moments, 1);

		// The actual moment values
		double moment10 = cvGetSpatialMoment(&moments, 1, 0);
		double moment01 = cvGetSpatialMoment(&moments, 0, 1);
		double area = cvGetCentralMoment(&moments, 0, 0);

		// Holding the last and current ball positions
		static int posX = 0;
		static int posY = 0;

		int lastX = posX;
		int lastY = posY;

		posX = moment10 / area;
		posY = moment01 / area;

		// Print it out for debugging purposes
		printf("position (%d,%d)\n", posX, posY);

		// We want to draw a line only if its a valid position
		if (lastX > 0 && lastY > 0 && posX > 0 && posY > 0) {
			// Draw a yellow line from the previous point to the current point
			cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
		}

		// Add the scribbling image and the frame... and we get a combination of the two
		cvAdd(&frame, imgScribble, &frame);
		cvCvtColor(&frame, &frame, CV_BGR2RGB);
		cvShowImage("video", &frame);
//	cvShowImage("video", imgScribble);
		cvShowImage("thresh", imgYellowThresh);

		// Wait for a keypress
		int c = cvWaitKey(10);
		if (c != -1) {
			// If pressed, break out of the loop
      break;
		}
  }

  return 0;
}
Esempio n. 2
0
/*
// Getting feature map for the selected subimage
//
// API
// int getFeatureMaps(const IplImage * image, const int k, featureMap **map);
// INPUT
// image             - selected subimage
// k                 - size of cells
// OUTPUT
// map               - feature map
// RESULT
// Error status
*/
int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
{
    int sizeX, sizeY;
    int p, px, stringSize;
    int height, width, numChannels;
    int i, j, kk, c, ii, jj, d;
    float  * datadx, * datady;

    int   ch;
    float magnitude, x, y, tx, ty;

    IplImage * dx, * dy;
    int *nearest;
    float *w, a_x, b_x;

    float kernel[3] = {-1.f, 0.f, 1.f};
    CvMat kernel_dx = cvMat(1, 3, CV_32F, kernel);
    CvMat kernel_dy = cvMat(3, 1, CV_32F, kernel);

    float * r;
    int   * alfa;

    float boundary_x[NUM_SECTOR + 1];
    float boundary_y[NUM_SECTOR + 1];
    float max, dotProd;
    int   maxi;

    height = image->height;
    width  = image->width ;

    numChannels = image->nChannels;

    dx    = cvCreateImage(cvSize(image->width, image->height),
                          IPL_DEPTH_32F, 3);
    dy    = cvCreateImage(cvSize(image->width, image->height),
                          IPL_DEPTH_32F, 3);

    sizeX = width  / k;
    sizeY = height / k;
    px    = 3 * NUM_SECTOR;
    p     = px;
    stringSize = sizeX * p;
    allocFeatureMapObject(map, sizeX, sizeY, p);

    cvFilter2D(image, dx, &kernel_dx, cvPoint(-1, 0));
    cvFilter2D(image, dy, &kernel_dy, cvPoint(0, -1));

    float arg_vector;
    for(i = 0; i <= NUM_SECTOR; i++)
    {
        arg_vector    = ( (float) i ) * ( (float)(PI) / (float)(NUM_SECTOR) );
        boundary_x[i] = cosf(arg_vector);
        boundary_y[i] = sinf(arg_vector);
    }/*for(i = 0; i <= NUM_SECTOR; i++) */

    r    = (float *)malloc( sizeof(float) * (width * height));
    alfa = (int   *)malloc( sizeof(int  ) * (width * height * 2));

    for(j = 1; j < height - 1; j++)
    {
        datadx = (float*)(dx->imageData + dx->widthStep * j);
        datady = (float*)(dy->imageData + dy->widthStep * j);
        for(i = 1; i < width - 1; i++)
        {
            c = 0;
            x = (datadx[i * numChannels + c]);
            y = (datady[i * numChannels + c]);

            r[j * width + i] =sqrtf(x * x + y * y);
            for(ch = 1; ch < numChannels; ch++)
            {
                tx = (datadx[i * numChannels + ch]);
                ty = (datady[i * numChannels + ch]);
                magnitude = sqrtf(tx * tx + ty * ty);
                if(magnitude > r[j * width + i])
                {
                    r[j * width + i] = magnitude;
                    c = ch;
                    x = tx;
                    y = ty;
                }
            }/*for(ch = 1; ch < numChannels; ch++)*/

            max  = boundary_x[0] * x + boundary_y[0] * y;
            maxi = 0;
            for (kk = 0; kk < NUM_SECTOR; kk++)
            {
                dotProd = boundary_x[kk] * x + boundary_y[kk] * y;
                if (dotProd > max)
                {
                    max  = dotProd;
                    maxi = kk;
                }
                else
                {
                    if (-dotProd > max)
                    {
                        max  = -dotProd;
                        maxi = kk + NUM_SECTOR;
                    }
                }
            }
            alfa[j * width * 2 + i * 2    ] = maxi % NUM_SECTOR;
            alfa[j * width * 2 + i * 2 + 1] = maxi;
        }/*for(i = 0; i < width; i++)*/
    }/*for(j = 0; j < height; j++)*/

    nearest = (int  *)malloc(sizeof(int  ) *  k);
    w       = (float*)malloc(sizeof(float) * (k * 2));

    for(i = 0; i < k / 2; i++)
    {
        nearest[i] = -1;
    }/*for(i = 0; i < k / 2; i++)*/
    for(i = k / 2; i < k; i++)
    {
        nearest[i] = 1;
    }/*for(i = k / 2; i < k; i++)*/

    for(j = 0; j < k / 2; j++)
    {
        b_x = k / 2 + j + 0.5f;
        a_x = k / 2 - j - 0.5f;
        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
    }/*for(j = 0; j < k / 2; j++)*/
    for(j = k / 2; j < k; j++)
    {
        a_x = j - k / 2 + 0.5f;
        b_x =-j + k / 2 - 0.5f + k;
        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
    }/*for(j = k / 2; j < k; j++)*/


    for(i = 0; i < sizeY; i++)
    {
      for(j = 0; j < sizeX; j++)
      {
        for(ii = 0; ii < k; ii++)
        {
          for(jj = 0; jj < k; jj++)
          {
            if ((i * k + ii > 0) &&
                (i * k + ii < height - 1) &&
                (j * k + jj > 0) &&
                (j * k + jj < width  - 1))
            {
              d = (k * i + ii) * width + (j * k + jj);
              (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]] +=
                  r[d] * w[ii * 2] * w[jj * 2];
              (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
                  r[d] * w[ii * 2] * w[jj * 2];
              if ((i + nearest[ii] >= 0) &&
                  (i + nearest[ii] <= sizeY - 1))
              {
                (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]             ] +=
                  r[d] * w[ii * 2 + 1] * w[jj * 2 ];
                (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
                  r[d] * w[ii * 2 + 1] * w[jj * 2 ];
              }
              if ((j + nearest[jj] >= 0) &&
                  (j + nearest[jj] <= sizeX - 1))
              {
                (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] +=
                  r[d] * w[ii * 2] * w[jj * 2 + 1];
                (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
                  r[d] * w[ii * 2] * w[jj * 2 + 1];
              }
              if ((i + nearest[ii] >= 0) &&
                  (i + nearest[ii] <= sizeY - 1) &&
                  (j + nearest[jj] >= 0) &&
                  (j + nearest[jj] <= sizeX - 1))
              {
                (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] +=
                  r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
                (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
                  r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
              }
            }
          }/*for(jj = 0; jj < k; jj++)*/
        }/*for(ii = 0; ii < k; ii++)*/
      }/*for(j = 1; j < sizeX - 1; j++)*/
    }/*for(i = 1; i < sizeY - 1; i++)*/

    cvReleaseImage(&dx);
    cvReleaseImage(&dy);


    free(w);
    free(nearest);

    free(r);
    free(alfa);

    return LATENT_SVM_OK;
}
Esempio n. 3
0
int main(int argc, char * argv[])
{
  static int framecounter=0;
  const CvSize imsize = cvSize(320,240);
  cvNamedWindow("Test");

  CvParticleState state;
  CvParticleObserve observe; 
  observe.cvParticleObserveInitialize("../data/pcaval.xml","../data/pcavec.xml","../data/pcaavg.xml");
  CvHaarCascadeDetector detector; detector.load();				 
  //CvAbstractTracker tracker;									 
  CvHandTracker tracker;

  CvCapture * capture = NULL;
  if (argc==1) {
    capture = cvCreateCameraCapture(0);
    // set resolution to 320x240
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, imsize.width);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, imsize.height);
  }else{
    capture = cvCreateFileCapture(argv[1]);
  }
  if (!capture) {fprintf(stderr, "Error: fail to open source video!\n");}

  static CvRect ROIs[50];
  CvParticle *particle = cvCreateParticle( 5/*num_states*/, 100/*num_particles*/, true/*logprob*/ );
  bool pf_initialized=false;

  static int delay = 0; framecounter=350;
  cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter);
  stMouseParam mouseParam;
  cvSetMouseCallback("Test",cbMouse, &mouseParam);
  while(1)
  {
    if (0){
      cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter++);
    }else{
      framecounter++;
    }
    IplImage * rawImage = cvQueryFrame(capture);
    if (!rawImage) {fprintf(stderr, "Info: end of video!\n"); break;}
    if (tracker.initialized()){
      tracker.update(rawImage);
    }else{
      tracker.initialize(rawImage);
      tracker.m_framecounter=framecounter;
    }
  

    // START PROCESSING HERE
    {
	  // draw face rectangles
		mouseParam.img=rawImage;
		float points_data[2];
		CvMat points = cvMat(1,1,CV_32FC2,points_data);
		points.data.fl[0]=mouseParam.pt.x;
		points.data.fl[1]=mouseParam.pt.y;
		cvOpticalFlowPointTrack(tracker.m_currImage, tracker.m_nextImage, &points,cvSize(10,10),2);

      {
        IplImage * dispImage = cvCloneImage(rawImage);
		cvCircle(dispImage,cvPoint(points.data.i[0],points.data.i[1]),2,CV_RED,-1);
        // draw frame count
        cvDrawFrameCount(dispImage, framecounter);
        // show
        cvShowImage("Test", dispImage); CV_WAIT2(10);
        cvReleaseImageEx(dispImage);
      }
    }
    
    int key = cvWaitKey(delay)&0xff;
    if (key==27){
      break;
    }else if (key==' '){
      if (delay){ delay = 0; }else{ delay = 30; }
    }else if (key=='f'){ // skip to next frame
    }else if (key!=0xff){
      fprintf(stderr, "Warning: Unknown key press : %c\n", key);
    } // end of key press processing
  } // end of video

  cvReleaseCapture(&capture);
  cvDestroyWindow("Test");
}
Esempio n. 4
0
CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,
                                CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
                                const CvArr* mask CV_DEFAULT(0) );

CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image,
                             CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) );

CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh,
                                    CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
                                    const CvArr* mask CV_DEFAULT(0) );

CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1),
                               float perimScale CV_DEFAULT(4.f),
                               CvMemStorage* storage CV_DEFAULT(0),
                               CvPoint offset CV_DEFAULT(cvPoint(0,0)));

#ifdef __cplusplus
}

namespace cv
{

/*!
 The Base Class for Background/Foreground Segmentation
 
 The class is only used to define the common interface for
 the whole family of background/foreground segmentation algorithms.
*/
class CV_EXPORTS_W BackgroundSubtractor
{
Esempio n. 5
0
 void MarkFace(cv::Mat &source, const cv::Rect &face)
 {
     CvPoint topLeft = cvPoint(face.x, face.y);
     CvPoint downRight = cvPoint(face.x + face.width-1, face.y + face.height-1);
     rectangle( source, topLeft, downRight, CV_RGB(0,0,255));  
 }
Esempio n. 6
0
void CvCalibFilter::DrawPoints(CvMat** dstarr) {
    int i, j;

    if (!dstarr) {
        assert(0);
        return;
    }

    if (latestCounts) {
        for (i = 0; i < cameraCount; i++) {
            if (dstarr[i] && latestCounts[i]) {
                CvMat dst_stub, *dst;
                int count = 0;
                bool found = false;
                CvPoint2D32f* pts = 0;

                GetLatestPoints(i, &pts, &count, &found);

                dst = cvGetMat(dstarr[i], &dst_stub);

                static const CvScalar line_colors[] = {
                    {{0, 0, 255}},
                    {{0, 128, 255}},
                    {{0, 200, 200}},
                    {{0, 255, 0}},
                    {{200, 200, 0}},
                    {{255, 0, 0}},
                    {{255, 0, 255}}
                };

                const int colorCount = sizeof(line_colors) / sizeof(line_colors[0]);
                const int r = 4;
                CvScalar color = line_colors[0];
                CvPoint prev_pt = { 0, 0};

                for (j = 0; j < count; j++) {
                    CvPoint pt;
                    pt.x = cvRound(pts[j].x);
                    pt.y = cvRound(pts[j].y);

                    if (found) {
                        if (etalonType == CV_CALIB_ETALON_CHESSBOARD) {
                            color = line_colors[(j/cvRound(etalonParams[0])) % colorCount];
                        } else {
                            color = CV_RGB(0, 255, 0);
                        }

                        if (j != 0) {
                            cvLine(dst, prev_pt, pt, color, 1, CV_AA);
                        }
                    }

                    cvLine(dst, cvPoint(pt.x - r, pt.y - r),
                           cvPoint(pt.x + r, pt.y + r), color, 1, CV_AA);

                    cvLine(dst, cvPoint(pt.x - r, pt.y + r),
                           cvPoint(pt.x + r, pt.y - r), color, 1, CV_AA);

                    cvCircle(dst, pt, r + 1, color, 1, CV_AA);

                    prev_pt = pt;
                }
            }
        }
    }
}
Esempio n. 7
0
void AirCursor::analyzeGrab()
{
    cvClearMemStorage(m_cvMemStorage);

    // get current depth map from Kinect
    const XnDepthPixel* depthMap = m_depthGenerator.GetDepthMap();

    // convert 16bit openNI depth map to 8bit IplImage used in opencv processing
    int origDepthIndex = 0;
    char* depthPtr = m_iplDepthMap->imageData;
    char* debugPtr = 0;
    if (m_debugImageEnabled) debugPtr = m_iplDebugImage->imageData;
    for (unsigned int y = 0; y < DEPTH_MAP_SIZE_Y; y++)
    {
        for (unsigned int x = 0; x < DEPTH_MAP_SIZE_X; x++)
        {
            // get current depth value from original depth map
            short depth = depthMap[origDepthIndex];

            // check that current value is in the allowed range determined by clipping distances,
            // and if it is map it to range 0 - 255 so that 255 is the closest value
            unsigned char pixel = 0;
            if (depth >= NEAR_CLIPPING_DISTANCE && depth <= FAR_CLIPPING_DISTANCE)
            {
               depth -= NEAR_CLIPPING_DISTANCE;
                    pixel = 255 - (255.0f * ((float)depth / (FAR_CLIPPING_DISTANCE - NEAR_CLIPPING_DISTANCE)));
            }
            else {
                pixel = 0;
            }
            m_iplDepthMap->imageData[y * m_iplDepthMap->widthStep + x] = pixel;
            *depthPtr = pixel;

            if (m_debugImageEnabled)
            {
                // init debug image with the same depth map
                *(debugPtr + 0) = pixel;
                *(debugPtr + 1) = pixel;
                *(debugPtr + 2) = pixel;
                debugPtr += 3;
            }
            origDepthIndex++;
            depthPtr++;
        }
    }

    // calculate region of interest corner points in real world coordinates
    XnPoint3D rwPoint1 = m_handPosRealWorld;
    rwPoint1.X -= HAND_ROI_SIZE_LEFT;
    rwPoint1.Y += HAND_ROI_SIZE_UP;
    XnPoint3D rwPoint2 = m_handPosRealWorld;
    rwPoint2.X += HAND_ROI_SIZE_RIGHT;
    rwPoint2.Y -= HAND_ROI_SIZE_DOWN;

    // convert corner points to projective coordinates
    XnPoint3D projPoint1, projPoint2;
    m_depthGenerator.ConvertRealWorldToProjective(1, &rwPoint1, &projPoint1);
    m_depthGenerator.ConvertRealWorldToProjective(1, &rwPoint2, &projPoint2);

    // round projected corner points to ints and clip them against the depth map
    int ROItopLeftX = qRound(projPoint1.X); int ROItopLeftY = qRound(projPoint1.Y);
    int ROIbottomRightX = qRound(projPoint2.X); int ROIbottomRightY = qRound(projPoint2.Y);
    if (ROItopLeftX < 0) ROItopLeftX = 0; else if (ROItopLeftX > DEPTH_MAP_SIZE_X - 1) ROItopLeftX = DEPTH_MAP_SIZE_X - 1;
    if (ROItopLeftY < 0) ROItopLeftY = 0; else if (ROItopLeftY > DEPTH_MAP_SIZE_Y - 1) ROItopLeftY = DEPTH_MAP_SIZE_Y - 1;
    if (ROIbottomRightX < 0) ROIbottomRightX = 0; else if (ROIbottomRightX > DEPTH_MAP_SIZE_X - 1) ROIbottomRightX = DEPTH_MAP_SIZE_X - 1;
    if (ROIbottomRightY < 0) ROIbottomRightY = 0; else if (ROIbottomRightY > DEPTH_MAP_SIZE_Y - 1) ROIbottomRightY = DEPTH_MAP_SIZE_Y - 1;

    // set region of interest
    CvRect rect = cvRect(ROItopLeftX, ROItopLeftY, ROIbottomRightX - ROItopLeftX, ROIbottomRightY - ROItopLeftY);
    if(rect.height > 0 && rect.width > 0)
    {
        cvSetImageROI(m_iplDepthMap, rect);
        if (m_debugImageEnabled) cvSetImageROI(m_iplDebugImage, rect);
    }

    // use depth threshold to isolate hand
    // as a center point of thresholding, it seems that it's better to use a point bit below
    // the point Nite gives as the hand point
    XnPoint3D rwThresholdPoint = m_handPosRealWorld; rwThresholdPoint.Y -= 30;
    XnPoint3D projThresholdPoint;
    m_depthGenerator.ConvertRealWorldToProjective(1, &rwThresholdPoint, &projThresholdPoint);
    int lowerBound = (unsigned char)m_iplDepthMap->imageData[(int)projThresholdPoint.Y * DEPTH_MAP_SIZE_X + (int)projThresholdPoint.X] - DEPTH_THRESHOLD;
    if (lowerBound < 0) lowerBound = 0;
    cvThreshold( m_iplDepthMap, m_iplDepthMap, lowerBound, 255, CV_THRESH_BINARY );

    // color used for drawing the hand in the debug image, green for normal and red for grab.
    // color lags one frame from actual grab status but in practice that shouldn't be too big of a problem
    int rCol, gCol, bCol;
    if(m_grabbing) {
        rCol = 255; gCol = 0; bCol = 0;
    }
    else {
        rCol = 0; gCol = 255; bCol = 0;
    }

    // go through the ROI and paint hand on debug image with current grab status color
    if (m_debugImageEnabled)
    {
        // index of first pixel in the ROI
        int startIndex = ROItopLeftY * m_iplDepthMap->widthStep + ROItopLeftX;

        depthPtr = &(m_iplDepthMap->imageData[startIndex]);
        debugPtr = &(m_iplDebugImage->imageData[startIndex * 3]);

        // how much index needs to increase when moving to next line
        int vertInc = m_iplDepthMap->widthStep - (ROIbottomRightX - ROItopLeftX);

        for (int y = ROItopLeftY; y < ROIbottomRightY; y++)
        {
            for (int x = ROItopLeftX; x < ROIbottomRightX; x++)
            {
                if((unsigned char)*depthPtr > 0)
                {
                    *(debugPtr + 0) = rCol / 2;
                    *(debugPtr + 1) = gCol / 2;
                    *(debugPtr + 2) = bCol / 2;
                }

                // next pixel
                depthPtr++;
                debugPtr += 3;
            }

            // next line
            depthPtr += vertInc;
            debugPtr += vertInc * 3;
        }
    }

    // find contours in the hand and draw them on debug image
    CvSeq* contours = 0;
    cvFindContours(m_iplDepthMap, m_cvMemStorage, &contours, sizeof(CvContour));
    if (m_debugImageEnabled)
    {
        if(contours)
        {
            cvDrawContours(m_iplDebugImage, contours, cvScalar(rCol, gCol , bCol), cvScalar(rCol, gCol, bCol), 1);
        }
    }

    // go through contours and search for the biggest one
    CvSeq* biggestContour = 0;
    double biggestArea = 0.0f;
    for(CvSeq* currCont = contours; currCont != 0; currCont = currCont->h_next)
    {
        // ignore small contours which are most likely caused by artifacts
        double currArea = cvContourArea(currCont);
        if(currArea < CONTOUR_MIN_SIZE) continue;

        if(!biggestContour || currArea > biggestArea) {
            biggestContour = currCont;
            biggestArea = currArea;
        }
    }

    int numOfValidDefects = 0;

    if(biggestContour)
    {
        // calculate convex hull of the biggest contour found which is hopefully the hand
        CvSeq* hulls = cvConvexHull2(biggestContour, m_cvMemStorage, CV_CLOCKWISE, 0);

        if (m_debugImageEnabled)
        {
            // calculate convex hull and return it in a different form.
            // only required for drawing
            CvSeq* hulls2 = cvConvexHull2(biggestContour, m_cvMemStorage, CV_CLOCKWISE, 1);

            // draw the convex hull
            cvDrawContours(m_iplDebugImage, hulls2, cvScalar(rCol, gCol , bCol), cvScalar(rCol, gCol, bCol), 1);
        }

        // calculate convexity defects of hand's convex hull
        CvSeq* defects = cvConvexityDefects(biggestContour, hulls, m_cvMemStorage);

        int numOfDefects = defects->total;

        if (numOfDefects > 0)
        {
            // calculate defect min size in projective coordinates.
            // this is done using a vector from current hand position to a point DEFECT_MIN_SIZE amount above it.
            // that vector is converted to projective coordinates and it's length is calculated.
            XnPoint3D rwTempPoint = m_handPosRealWorld;
            rwTempPoint.Y += DEFECT_MIN_SIZE;
            XnPoint3D projTempPoint;
            m_depthGenerator.ConvertRealWorldToProjective(1, &rwTempPoint, &projTempPoint);
            int defectMinSizeProj = m_handPosProjected.Y - projTempPoint.Y;

            // convert opencv seq to array
            CvConvexityDefect* defectArray;defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect) * numOfDefects);
            cvCvtSeqToArray(defects, defectArray, CV_WHOLE_SEQ);

            for(int i = 0; i < numOfDefects; i++)
            {
                // ignore too small defects
                if((defectArray[i].depth) < defectMinSizeProj)
                {
                   continue;
                }

               numOfValidDefects++;

               if (m_debugImageEnabled)
               {
                   // draw blue point to defect
                   cvCircle(m_iplDebugImage, *(defectArray[i].depth_point), 5, cvScalar(0, 0, 255), -1);
                   cvCircle(m_iplDebugImage, *(defectArray[i].start), 5, cvScalar(0, 0, 255), -1);
                   cvCircle(m_iplDebugImage, *(defectArray[i].end), 5, cvScalar(0, 0, 255), -1);
               }
            }

            free(defectArray);
        }
    }

    if (m_debugImageEnabled)
    {
        cvResetImageROI(m_iplDebugImage);

        // draw white dot on current hand position
        cvCircle(m_iplDebugImage, cvPoint(m_handPosProjected.X, m_handPosProjected.Y), 5, cvScalar(255, 255, 255), -1);

        // draw gray dot on current center of threshold position
        //cvCircle(m_iplDebugImage, cvPoint(projThresholdPoint.X, projThresholdPoint.Y), 5, cvScalar(127, 127, 127), -1);

        // draw ROI with green
        //cvRectangle(m_iplDebugImage, cvPoint(ROItopLeftX, ROItopLeftY), cvPoint(ROIbottomRightX, ROIbottomRightY), cvScalar(0, 255, 0));
    }

    // determine current grab status based on defect count
    if(numOfValidDefects <= GRAB_MAX_DEFECTS)
    {
        m_currentGrab = true;
    }
    else
    {
        m_currentGrab = false;
    }

    if (m_debugImageEnabled)
    {
        // debug strings
        QList<QString> debugStrings;
        debugStrings.push_back(QString("hand distance: " + QString::number(m_handPosRealWorld.Z) + " mm").toStdString().c_str());
        debugStrings.push_back(QString("defects: " + QString::number(numOfValidDefects)).toStdString().c_str());

        // convert iplDebugImage to QImage
        char* scanLinePtr = m_iplDebugImage->imageData;
        for (int y = 0;y < DEPTH_MAP_SIZE_Y; y++) {
            memcpy(m_debugImage->scanLine(y), scanLinePtr, DEPTH_MAP_SIZE_X * 3);
            scanLinePtr += DEPTH_MAP_SIZE_X * 3;
        }

        emit debugUpdate(*m_debugImage, debugStrings);
    }
}
Esempio n. 8
0
int main (int argc, const char * argv[]) {
	char *imagefilename=(char*)malloc(sizeof(char)*16);
	char *dscfilename=(char*)malloc(sizeof(char)*16);
	if (argc<3) {
		printf("Usage: ./dump-descr image-file-name descriptor-file-name");
		strcpy(imagefilename, "savekkkk.jpg");
		strcpy(dscfilename, "saveD.jpg.dsc");
	}
	else {
		strcpy(imagefilename,argv[1]);
		strcpy(dscfilename,argv[2]);
	}
	
	FILE* dscfile;
	int w=1280,h=720;
	int i=0;
	int nkeypoints=0;
	vl_bool render=1;
	vl_bool first=1;
	VlSiftFilt * myFilter=0;
	VlSiftKeypoint const* keys;
	char img2_file[] = "/Users/quake0day/ana2/MVI_0124.MOV";
	
	//printf("sizeof(VlSiftKeypoint)=%d, filt=%d, pix=%d\n", sizeof(VlSiftKeypoint), sizeof(VlSiftFilt),sizeof(vl_sift_pix));
	
	dscfile=fopen(dscfilename, "wb");
	vl_sift_pix* fim;
	int err=0;
	int octave, nlevels, o_min;
	
	//vl_sift_pix descr[128];
	
	
	//CvCapture * camera = cvCreateCameraCapture (CV_CAP_ANY);
	CvCapture * camera = cvCreateFileCapture(img2_file);

	cvNamedWindow("Hello", 1);
	
	IplImage *myCVImage=cvQueryFrame(camera);//cvLoadImage(imagefilename, 0);
	
	IplImage *afterCVImage=cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
	IplImage *resizingImg=cvCreateImage(cvSize(w, h), myCVImage->depth, myCVImage->nChannels);
	octave=3;
	nlevels=10;
	o_min=1;
	myFilter=vl_sift_new(w, h, octave, nlevels, o_min);
	vl_sift_set_peak_thresh(myFilter, 0.5);
	fim=malloc(sizeof(vl_sift_pix)*w*h);
	int press=0;
	
	while (myCVImage) {
		
		dprintf("%d*%d\n",myCVImage->width,myCVImage->height);
		//w=myCVImage->width;
		//h=myCVImage->height;
		
		cvResize(myCVImage, resizingImg, CV_INTER_AREA);
		dprintf("resized scale:%d*%d\n",myCVImage->width,myCVImage->height);
		cvConvertImage(resizingImg, afterCVImage, 0);
		
		
		for (i=0; i<h; i++) {
			for (int j=0; j<w; j++) {
				fim[i*w+j]=CV_IMAGE_ELEM(afterCVImage,uchar,i,j);
				//printf("%f ", fim[i*w+j]);
			}
		}
		
		
		//vl_sift_set_peak_thresh(myFilter, 0.5);
		//vl_sift_set_edge_thresh(myFilter, 10.0);
		first=1;
		while (1) {
			if (first) {
				first=0;
				err=vl_sift_process_first_octave(myFilter, fim);
			}
			else {
				err=vl_sift_process_next_octave(myFilter);
			}
			if (err) {
				err=VL_ERR_OK;
				break;
			}
			
			vl_sift_detect(myFilter);
			nkeypoints=vl_sift_get_nkeypoints(myFilter);
			dprintf("insider numkey:%d\n",nkeypoints);
			keys=vl_sift_get_keypoints(myFilter);
			dprintf("final numkey:%d\n",nkeypoints);
			
			
			if (render) {
				for (i=0; i<nkeypoints; i++) {
					cvCircle(resizingImg, cvPoint(keys->x, keys->y), keys->sigma, cvScalar(100, 255, 50, 0), 1, CV_AA, 0);
					//printf("x:%f,y:%f,s:%f,sigma:%f,\n",keys->x,keys->y,keys->s,keys->sigma);
					if (press=='d') {
						
						double angles [4] ;
						int nangles ;
						
						/* obtain keypoint orientations ........................... */
						nangles=vl_sift_calc_keypoint_orientations(myFilter, angles, keys);
						
						/* for each orientation ................................... */
						for (int q = 0 ; q < (unsigned) nangles ; ++q) {
							vl_sift_pix descr [128] ;
							
							
							//printf("\n");
							/* compute descriptor (if necessary) */
							vl_sift_calc_keypoint_descriptor(myFilter, descr, keys, angles[q]);
							for (int j=0; j<128; j++) {
								descr[j]*=512.0;
								descr[j]=(descr[j]<255.0)?descr[j]:255.0;
								printf("%f ", descr[j]);
							}
							fwrite(descr, sizeof(vl_sift_pix), 128, dscfile);
						}
					}
					keys++;
				}
			}
			
		}
		
		cvShowImage("Hello", resizingImg);
		
		myCVImage = cvQueryFrame(camera);
		
		press=cvWaitKey(1);
		if( press=='q' )
			return 0;
		else if( press=='r' )
			render=1-render;
	}
	free(fim);
	cvReleaseImage(&afterCVImage);
	cvReleaseImage(&resizingImg);
	cvReleaseImage(&myCVImage);
	
	return 0;
}
Esempio n. 9
0
// chain function - this function does the actual processing
static GstFlowReturn
gst_bgfg_acmmm2003_chain(GstPad *pad, GstBuffer *buf)
{
    GstBgFgACMMM2003 *filter;

    // sanity checks
    g_return_val_if_fail(pad != NULL, GST_FLOW_ERROR);
    g_return_val_if_fail(buf != NULL, GST_FLOW_ERROR);

    filter = GST_BGFG_ACMMM2003(GST_OBJECT_PARENT(pad));

    filter->image->imageData = (gchar*) GST_BUFFER_DATA(buf);

    // the bg model must be initialized with a valid image; thus we delay its
    // creation until the chain function
    if (filter->model == NULL) {
        filter->model = cvCreateFGDStatModel(filter->image, NULL);

        ((CvFGDStatModel*)filter->model)->params.minArea           = filter->min_area;
        ((CvFGDStatModel*)filter->model)->params.erode_iterations  = filter->n_erode_iterations;
        ((CvFGDStatModel*)filter->model)->params.dilate_iterations = filter->n_dilate_iterations;

        return gst_pad_push(filter->srcpad, buf);
    }

    cvUpdateBGStatModel(filter->image, filter->model, -1);

    // send mask event, if requested
    if (filter->send_mask_events) {
        GstStructure *structure;
        GstEvent     *event;
        GArray       *data_array;
        IplImage     *mask;

        // prepare and send custom event with the mask surface
        mask = filter->model->foreground;
        data_array = g_array_sized_new(FALSE, FALSE, sizeof(mask->imageData[0]), mask->imageSize);
        g_array_append_vals(data_array, mask->imageData, mask->imageSize);

        structure = gst_structure_new("bgfg-mask",
                                      "data",      G_TYPE_POINTER, data_array,
                                      "width",     G_TYPE_UINT,    mask->width,
                                      "height",    G_TYPE_UINT,    mask->height,
                                      "depth",     G_TYPE_UINT,    mask->depth,
                                      "channels",  G_TYPE_UINT,    mask->nChannels,
                                      "timestamp", G_TYPE_UINT64,  GST_BUFFER_TIMESTAMP(buf),
                                      NULL);

        event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
        gst_pad_push_event(filter->srcpad, event);
        g_array_unref(data_array);

        if (filter->display) {
            // shade the regions not selected by the acmmm2003 algorithm
            cvXorS(mask,          CV_RGB(255, 255, 255), mask,          NULL);
            cvSubS(filter->image, CV_RGB(191, 191, 191), filter->image, mask);
            cvXorS(mask,          CV_RGB(255, 255, 255), mask,          NULL);
        }
    }

    if (filter->send_roi_events) {
        CvSeq        *contour;
        CvRect       *bounding_rects;
        guint         i, j, n_rects;

        // count # of contours, allocate array to store the bounding rectangles
        for (contour = filter->model->foreground_regions, n_rects = 0;
             contour != NULL;
             contour = contour->h_next, ++n_rects);

        bounding_rects = g_new(CvRect, n_rects);

        for (contour = filter->model->foreground_regions, i = 0; contour != NULL; contour = contour->h_next, ++i)
            bounding_rects[i] = cvBoundingRect(contour, 0);

        for (i = 0; i < n_rects; ++i) {
            // skip collapsed rectangles
            if ((bounding_rects[i].width == 0) || (bounding_rects[i].height == 0)) continue;

            for (j = (i + 1); j < n_rects; ++j) {
                // skip collapsed rectangles
                if ((bounding_rects[j].width == 0) || (bounding_rects[j].height == 0)) continue;

                if (rect_overlap(bounding_rects[i], bounding_rects[j])) {
                    bounding_rects[i] = rect_collapse(bounding_rects[i], bounding_rects[j]);
                    bounding_rects[j] = NULL_RECT;
                }
            }
        }

        for (i = 0; i < n_rects; ++i) {
            GstEvent     *event;
            GstStructure *structure;
            CvRect        r;

            // skip collapsed rectangles
            r = bounding_rects[i];
            if ((r.width == 0) || (r.height == 0)) continue;

            structure = gst_structure_new("bgfg-roi",
                                          "x",         G_TYPE_UINT,   r.x,
                                          "y",         G_TYPE_UINT,   r.y,
                                          "width",     G_TYPE_UINT,   r.width,
                                          "height",    G_TYPE_UINT,   r.height,
                                          "timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP(buf),
                                          NULL);

            event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
            gst_pad_send_event(filter->sinkpad, event);

            if (filter->verbose)
                GST_INFO("[roi] x: %d, y: %d, width: %d, height: %d\n",
                         r.x, r.y, r.width, r.height);

            if (filter->display)
                cvRectangle(filter->image, cvPoint(r.x, r.y), cvPoint(r.x + r.width, r.y + r.height),
                            CV_RGB(0, 0, 255), 1, 0, 0);
        }

        g_free(bounding_rects);
    }

    if (filter->display)
        gst_buffer_set_data(buf, (guchar*) filter->image->imageData, filter->image->imageSize);

    return gst_pad_push(filter->srcpad, buf);
}
Esempio n. 10
0
int WINAPI WinMain(HINSTANCE hThisInstance, HINSTANCE hPrevInstance, LPSTR lpszArgs, int nWinMode)
{
	// переменные для хранения изображений
	IplImage *frame = 0, *image = 0, *hsv = 0, *dst = 0, *dst2 = 0, *color_indexes = 0, *dst3 = 0, *image2 = 0, *tmp = 0;
	int key = 0, zx = 0, zy = 0;

	// загружаем картинку из файла
	IplImage *menu = cvLoadImage("menu.png");
	// создаем главное окно проекта
	cvNamedWindow("Проект OpenCV");
	cvShowImage("Проект OpenCV",menu);
	cvMoveWindow("Проект OpenCV",100,50);

	// получаем любую подключенную Web-камеру
    CvCapture *capture = cvCaptureFromCAM(CV_CAP_ANY);

    // частота кадров
	double fps = 18;
	// инициализация записи видео в файл; 4-буквенный код кодека для обработки видео, формируется макросом CV_FOURCC
	CvVideoWriter *writer = cvCreateVideoWriter("record.avi", CV_FOURCC('I','Y','U','V'), fps, cvSize(640, 480), 1);

	if (!capture)
		return 0;
	else
	{
		while(key != 27)
		{
			// получаем текущий кадр
			frame = cvQueryFrame(capture);
			// копируем его для обработки
			image = cvCloneImage(frame);
	
			// зум
			if(key=='+')
			{
					zx = zx + 4;
					zy = zy + 3;
			}
			if(key=='-')
			{
					zx = zx - 4;
					zy = zy - 3;
			}
			if(zx > 300)
			{
					zx = 300;
					zy = 225;
			}
			if(zx < 0)
			{
					zx = 0;
					zy = 0;
			}

			// задаем ширину и высоту ROI
			int zwidth = frame->width-2*zx; 
			int zheight = frame->height-2*zy;

			// устанавливаем ROI (Region Of Interest — интересующая область изображения)
			cvSetImageROI(frame, cvRect(zx,zy,zwidth,zheight));
			// копируем интересующую область в переменную image2
			image2 = cvCloneImage(frame); 
			// создаем пустое изображение размером 640x480
			tmp = cvCreateImage( cvSize(640, 480), frame->depth, frame->nChannels );
			// размещаем ROI на пустое изображение tmp
			cvResize(image2, tmp, 0);

			// сохраняем кадр в видео-файл
            cvWriteFrame(writer, tmp);

			// сбрасываем ROI
			cvResetImageROI(frame);

			// инициализация шрифта
			CvFont font;
			cvInitFont( &font, CV_FONT_HERSHEY_COMPLEX,1.0, 1.0, 0, 1, CV_AA);
			// используя шрифт выводим на картинку текст
			cvPutText(tmp, "press '+' to increase", cvPoint(150, 40), &font, CV_RGB(150, 0, 150) );
			cvPutText(tmp, "press '-' to reduce", cvPoint(165, 450), &font, CV_RGB(150, 0, 150) );

			// число пикселей данного цвета на изображении 
			uint colorCount[NUM_COLOR_TYPES] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };

			hsv = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); 
			cvCvtColor( image, hsv, CV_BGR2HSV );

			// картинки для хранения результатов
			dst = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); 
			dst2 = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
			color_indexes = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 1 ); //для хранения индексов цвета

			// для хранения RGB цветов
			CvScalar rgb_colors[NUM_COLOR_TYPES];

			int i=0, j=0, x=0, y=0;

			// обнуляем цвета
			for(i=0; i<NUM_COLOR_TYPES; i++) {
					rgb_colors[i] = cvScalarAll(0);
			}

			for (y=0; y<hsv->height; y++) {
					for (x=0; x<hsv->width; x++) {

							// получаем HSV-компоненты пикселя
							uchar H = CV_PIXEL(uchar, hsv, x, y)[0];        // Hue
							uchar S = CV_PIXEL(uchar, hsv, x, y)[1];        // Saturation
							uchar V = CV_PIXEL(uchar, hsv, x, y)[2];        // Value (Brightness)

							// определяем к какому цвету можно отнести данные значения
							int ctype = getPixelColorType(H, S, V);

							// устанавливаем этот цвет у отладочной картинки
							CV_PIXEL(uchar, dst, x, y)[0] = cCTHue[ctype];  // Hue
							CV_PIXEL(uchar, dst, x, y)[1] = cCTSat[ctype];  // Saturation
							CV_PIXEL(uchar, dst, x, y)[2] = cCTVal[ctype];  // Value

							// собираем RGB-составляющие
							rgb_colors[ctype].val[0] += CV_PIXEL(uchar, image, x, y)[0]; // B
							rgb_colors[ctype].val[1] += CV_PIXEL(uchar, image, x, y)[1]; // G
							rgb_colors[ctype].val[2] += CV_PIXEL(uchar, image, x, y)[2]; // R

							// сохраняем к какому типу относится цвет
							CV_PIXEL(uchar, color_indexes, x, y)[0] = ctype;

							// подсчитываем
							colorCount[ctype]++;
					}
			}

			// усреднение RGB-составляющих
			for(i=0; i<NUM_COLOR_TYPES; i++) {
					rgb_colors[i].val[0] /= colorCount[i];
					rgb_colors[i].val[1] /= colorCount[i];
					rgb_colors[i].val[2] /= colorCount[i];
			}

			// теперь загоним массив в вектор и отсортируем
			std::vector< std::pair< int, uint > > colors;
			colors.reserve(NUM_COLOR_TYPES);

			for(i=0; i<NUM_COLOR_TYPES; i++){
					std::pair< int, uint > color;
					color.first = i;
					color.second = colorCount[i];
					colors.push_back( color );
			}
		
			// сортируем
			std::sort( colors.begin(), colors.end(), colors_sort );

			// покажем цвета
			cvZero(dst2);
			int h = dst2->height;
			int w = dst2->width / RECT_COLORS_SIZE;
			for(i=0; i<RECT_COLORS_SIZE; i++ ){
					cvRectangle(dst2, cvPoint(i*w, 0), cvPoint(i*w+w, h), rgb_colors[colors[i].first], -1);
			}

			// покажем картинку в найденных цветах
			dst3 = cvCloneImage(image);
			for (y=0; y<dst3->height; y++) {
					for (x=0; x<dst3->width; x++) {
							int color_index = CV_PIXEL(uchar, color_indexes, x, y)[0];

							CV_PIXEL(uchar, dst3, x, y)[0] = rgb_colors[color_index].val[0];
							CV_PIXEL(uchar, dst3, x, y)[1] = rgb_colors[color_index].val[1];
							CV_PIXEL(uchar, dst3, x, y)[2] = rgb_colors[color_index].val[2];
					}
			}

			// конвертируем отладочную картинку обратно в RGB
			cvCvtColor( dst, dst, CV_HSV2BGR );

			cvSetMouseCallback("Проект OpenCV", ClickOnMenu, (void*) menu);

			if(flag_1 == 1)
			{
				cvNamedWindow("Веб-камера", CV_WINDOW_AUTOSIZE);
				cvShowImage("Веб-камера", image);
			}
			else cvDestroyWindow("Веб-камера");
			if(flag_2 == 1)
			{
				cvNamedWindow("Zoom", CV_WINDOW_AUTOSIZE);
				cvShowImage("Zoom", tmp);
			}
			else cvDestroyWindow("Zoom");
			if(flag_3 == 1)
			{
				cvNamedWindow("Обнаруженные цвета");
				cvShowImage("Обнаруженные цвета", dst2);
			}
			else cvDestroyWindow("Обнаруженные цвета");
			if(flag_4 == 1)
			{
				cvNamedWindow("Изображение в обнаруженных цветах");
				cvShowImage("Изображение в обнаруженных цветах", dst3);
			}
			else cvDestroyWindow("Изображение в обнаруженных цветах");
			if(flag_5 == 1)
			{
				cvNamedWindow("Из HSV в RGB");
				cvShowImage("Из HSV в RGB", dst);
			}
			else cvDestroyWindow("Из HSV в RGB");
	
			// освобождаем ресурсы
			cvReleaseImage(&hsv);
			cvReleaseImage(&dst);
			cvReleaseImage(&dst2);
			cvReleaseImage(&color_indexes);
			cvReleaseImage(&dst3);
			cvReleaseImage(&image);
			cvReleaseImage(&image2);
			cvReleaseImage(&tmp);

			if(flag_exit == 1)
			{
				cvReleaseCapture(&capture);
				cvReleaseVideoWriter(&writer); // закрываем видео-файл
				return 0;
			}

			// если нажали ESC - выходим из цикла
			key = cvWaitKey(1);
		}

		// освобождаем инициализированные ранее переменные
		cvReleaseCapture(&capture);
		cvReleaseVideoWriter(&writer);

	}
    return 0;
}
Esempio n. 11
0
void CHandDrawEffect::DrawEdge(IplImage* image, IplImage* image2, IplImage* base, int plane)
{
	CvSeq* contourSeq0 = NULL;

	int height    = image->height;
	int width     = image->width;
	int step      = image->widthStep;
	int channels  = image->nChannels;
	uchar* data   = (uchar*)image->imageData;

	if(plane < 3) {
		cvCvtColor(image, hsv, CV_BGR2HSV);				// HSVのplaneを線画生成の元にする
		for(int i = 0; i < height * width; i++)
			grayImage->imageData[i] = hsv->imageData[i * 3 + plane];
	} else {
		cvCvtColor(image, grayImage, CV_BGR2GRAY);		// グレーイメージを作り線画生成の元にする
	}

	IplImage* target = base;					// 書き込むターゲットイメージ

	for(int x = 20; x < 240; x += Y) {
		cvThreshold(grayImage, binaryImage, x, 255, CV_THRESH_BINARY);	// x の値を境に2値化し輪郭を抽出
		contourSeq0 = 0;
		cvFindContours(binaryImage, memStorage0, &contourSeq0, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0)); // 輪郭線探索

		if(lineNoise > 0) { // 不連続ラインの場合
			for(; contourSeq0 != 0; contourSeq0 = contourSeq0->h_next) {
				CvPoint *p;
				if(contourSeq0->total< X * 5) continue;		// 5角形以下の細かいのは排除

				int index = 0;
				for(int i = 0; i < contourSeq0->total; i += X) {
					p = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, i);				// 点の場所と色を登録
					CvScalar color = GetColor(image2, p->x, p->y);
					MulScaler(color, DARK);										// 輝度を修正
					color.val[3] = CheckPoint(image, p->x, p->y, lineNoise);	// 有効点かどうかを近接ピクセルから判断して[3]へ格納
					SetPoint(index, p, color);									// pointTableへ保存
					index++;
					if(index > MAX_POINT) {
					//	printf("INDEX ERROR\n"); 
						index = 0;
					}
				}
				// 5連続以下の有効点は無効 (Pending:高速化)
				for(int i = 0; i < index; i++) {
					int p1 = i;
					int p2, p3, p4, p0;
					if(pointTable[p1].color.val[3]) {
						p2 = (p1 + 1) % index;
						p3 = (p1 + 2) % index;
						p4 = (p1 + 3) % index;
						p0 = (p1 - 1 + index) % index;
						if(pointTable[p0].color.val[3]) continue;
						if(!pointTable[p2].color.val[3] ||
							!pointTable[p3].color.val[3] ||
							!pointTable[p4].color.val[3]) {						
							pointTable[p1].color.val[3] = 0;
						}
					}
				}
				// 接続された有効点を描く
				for(int i = 0; i < index; i++) {
					int p1 = i;
					int p2 = (i + 1) % index;	// if (p2==index) p2 = 0;
					if(pointTable[p1].color.val[3] && pointTable[p2].color.val[3]) {
						CvScalar c = pointTable[p1].color;
						MulScaler(c, DARK);
						cvLine(target, pointTable[p1].p, pointTable[p2].p, c, lineWidth, CV_AA);
					}
				}
			}
		} else {
			// 全部描く場合
			for(; contourSeq0 != 0; contourSeq0 = contourSeq0->h_next) {

				CvPoint *p1 = 0;
				CvPoint *p2;

				if(contourSeq0->total < X * 5) continue;		

				for(int i = 0; i < contourSeq0->total; i += X) {
					p1 = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, (i) % contourSeq0->total);//始点
					p2 = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, (i + X + Z) % contourSeq0->total);// 終点
					CvScalar color = GetColor(image, p1->x, p1->y);
					MulScaler(color, DARK);
					cvLine(target, *p1, *p2, color, lineWidth, CV_AA);
				}
			}
		}
	}
	cvClearMemStorage(memStorage0);
}
Esempio n. 12
0
#include <opencv\highgui.h>
#include <opencv\cv.h>
#include <math.h>
#include "Trilateration_2D.h"
double r1, r2, r3;

CvPoint Anchor1 = cvPoint(0, 0);
CvPoint Anchor2 = cvPoint(400, 0);
CvPoint Anchor3 = cvPoint(255, 450);

IplImage* bckgrd = cvCreateImage(cvSize(500, 500), IPL_DEPTH_8U, 3);
double getDistance(CvPoint center, CvPoint tag);
void on_mouseEvent(int event, int x, int y, int flag, void* param);

int main()
{


	r1 = 0;
	r2 = 0;
	r3 = 0;
	//IplImage* bckgrd = cvLoadImage("bckgrd.jpg", CV_LOAD_IMAGE_UNCHANGED);
	cvNamedWindow("trilateration");

	cvSetMouseCallback("trilateration", on_mouseEvent, bckgrd);



	cvShowImage("trilateration", bckgrd);
	cvWaitKey(0);
	cvReleaseImage(&bckgrd);
Esempio n. 13
0
bool CalibrateTarget(
    QLCalibrationId calibrationId,
    QLCalibrationTarget target,
    IplImage* displayImage,
    const char* windowName)
{
    // Loop on this target until eye data was successfully collected for each
    // eye.
    QLCalibrationStatus status = QL_CALIBRATION_STATUS_OK;
    do
    {
	    //Clear the calibration window
	    memset(displayImage->imageData, 128, displayImage->imageSize);

	    // Display the cleared image buffer in the calibration window.
	    cvShowImage(windowName, displayImage);

        // Wait for a little bit so show the blanked screen.
        if(cvWaitKeyEsc == cvWaitKey(100))
            return false;

        // The target positions are in percentage of the area to be tracked so
        // we need to scale to the calibration window size.
        int tx = (int)target.x * displayImage->width / 100;
        int ty = (int)target.y * displayImage->height / 100;

	    // Draw a target to the image buffer
	    DrawTarget(displayImage, cvPoint(tx, ty),  20, CV_RGB(0, 255, 0));
    		
	    // Display the image buffer in the calibration window.
	    cvShowImage(windowName, displayImage);

	    // Wait a little bit so the user can see the target before we calibrate.
	    cvWaitKey(250);

        // Calibrate the target for 1000 ms. This can be done two ways; blocking and
        // non-blocking. For blocking set the block variable to true. for
        // non-blocking set it to false. 
	    bool block = false;
        QLCalibration_Calibrate(calibrationId, target.targetId, 1500, block);

        // When non-blocking is used, the status of the target needs to be
        // polled to determine when it has finished. During the polling we can
        // do other things like querying user input as we do here to see if
        // the user wants to quit the calibration.
        int keyPressed = 0;
        while(!block &&
            ((keyPressed = cvWaitKey(10)) != cvWaitKeyEsc) &&
            (QLCalibration_GetStatus(calibrationId, target.targetId, &status) == QL_ERROR_OK) &&
            (status == QL_CALIBRATION_STATUS_CALIBRATING));

        // If the user terminated the calibration early then return false.
        if(keyPressed == cvWaitKeyEsc)
            return false;

        // Get the status of the target.
        QLCalibration_GetStatus(calibrationId, target.targetId, &status);
    }while(status != QL_CALIBRATION_STATUS_OK);

    // Return true to indicate that the target has successfully been calibrated
    return true;
};
Esempio n. 14
0
bool AutoCalibrate(
    QLDeviceId deviceId, 
    QLCalibrationType calibrationType,  
    QLCalibrationId* calibrationId)
{
    QLError qlerror = QL_ERROR_OK;

    // Initialize the calibration using the inputed data.
    qlerror = QLCalibration_Initialize(deviceId, *calibrationId, calibrationType);

    // If the calibrationId was not valid then create a new calibration
    // container and use it.
    if(qlerror == QL_ERROR_INVALID_CALIBRATION_ID)
    {
        QLCalibration_Create(0, calibrationId);
        qlerror = QLCalibration_Initialize(deviceId, *calibrationId, calibrationType);
    }

    // If the initialization failed then print an error and return false.
    if(qlerror == QL_ERROR_INVALID_DEVICE_ID)
    {
        printf_s("QLCalibration_Initialize() failed with error code %d.\n", qlerror);
        return false;
    }

    // Create a buffer for the targets. This just needs to be large enough to
    // hold the targets. 
    const int bufferSize = 20;
    int numTargets = bufferSize;
    QLCalibrationTarget targets[bufferSize];

    // Get the targets. After the call, numTargets will contain the number of
    // actual targets. 
    qlerror = QLCalibration_GetTargets(*calibrationId, &numTargets, targets);

    // If the buffer was not large enough then print an error and return false.
    if(qlerror == QL_ERROR_BUFFER_TOO_SMALL)
    {
        printf_s(
            "The target buffer is too small. It should be at least %d bytes.\n", 
            numTargets * sizeof(QLCalibrationTarget));
        return false;
    }
    

    // Use OpenCV to create a window for doing the calibration. The calibration
    // will only be valid over the area of this window. If the entire screen
    // area is to be calibrated then this window should be set to the screen
    // size. 
    int windowWidth = 512;
    int windowHeight = 384;
    const char* windowName = "Calibration Window";
    IplImage* displayImage = cvCreateImage(cvSize(windowWidth, windowHeight), 8, 3);
    cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);
	cvMoveWindow(windowName, 0, 0);
	cvResizeWindow(windowName, windowWidth, windowHeight);

    //Clear the calibration window
    memset(displayImage->imageData, 128, displayImage->imageSize);

    // Create a font for printing to the window.
    CvFont font;
    cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX, .5, .5, 0, 1);

    // Print a message to the image.
    int lineSize = 20;
    int lineIndent = 10;
    cvPutText(
        displayImage, 
        "The calibration is dependant on the location of the calibration area.",
        cvPoint(lineIndent, lineSize * 1),
        &font,
        CV_RGB(255, 255, 255));
    cvPutText(
        displayImage, 
        "Move the window to the area of the screen you would like calibrate.",
        cvPoint(lineIndent, lineSize * 2),
        &font,
        CV_RGB(255, 255, 255));
    cvPutText(
        displayImage, 
        "Press ENTER when ready.",
        cvPoint(lineIndent, lineSize * 3),
        &font,
        CV_RGB(255, 255, 255));
    cvPutText(
        displayImage, 
        "Press ESC at any time to terminate the calibration",
        cvPoint(lineIndent, lineSize * 4),
        &font,
        CV_RGB(255, 255, 255));

    // Display the image to the window.
    cvShowImage(windowName, displayImage);

    // Wait for the user to place the window and press a key.
    if(cvWaitKey() == cvWaitKeyEsc)
    {
        QLCalibration_Cancel(*calibrationId);
	    cvReleaseImage(&displayImage);
	    cvDestroyWindow(windowName);
        return false;
    }

    // Loop through each target and calibrate them
	for (int i = 0; i < numTargets; i++) 
	{
		// *Cullen/Capstone* Emitting a beeping noise every time the target needs to be switched.
		printf("\a");

        // Calibrate each target. If the user terminates the calibration then
        // return false.
        if(!CalibrateTarget(
            *calibrationId, 
            targets[i],
            displayImage,
            windowName))
        {
            QLCalibration_Cancel(*calibrationId);
			cvReleaseImage(&displayImage);
			cvDestroyWindow(windowName);
            return false;
        }
            
    }

    // Get the scores and display them. If the user wants to improve the
    // calibration then recalibrate the worst target and loop through again.
    int keyPress = 0;
    do
    { 
        // When all calibration targets have been successfully calibrated then get
        // the scoring. Scores can only be calculated once all targets have been
        // calibrated.
        QLCalibrationScore leftScores[bufferSize];
        QLCalibrationScore rightScores[bufferSize];
	    for (int i = 0; i < numTargets; i++) 
	    {
            QLCalibration_GetScoring(
                *calibrationId, 
                targets[i].targetId, 
                QL_EYE_TYPE_LEFT, 
                leftScores + i);

            QLCalibration_GetScoring(
                *calibrationId, 
                targets[i].targetId, 
                QL_EYE_TYPE_RIGHT, 
                rightScores + i);
        }

        // Display the calibration results graphically.
        DisplayCalibration(displayImage, targets, leftScores, rightScores, numTargets, windowName);

        // Wait for user input to determine what to do.
        keyPress = cvWaitKey();

        // If the user wants to improve the calibration then determine which
        // target has the largest score and recalibrate it.
        if(keyPress == 'i')
        {
            float highestScore = 0;
            int highestIndex = 0;
	        for (int i = 0; i < numTargets; i++) 
	        {
                if(leftScores[i].score > highestScore)
                {
                    highestScore = leftScores[i].score;
                    highestIndex = i;
                }

                if(rightScores[i].score > highestScore)
                {
                    highestScore = rightScores[i].score;
                    highestIndex = i;
                }
            }

            // Calibrate the target. If the user terminates the calibration then
            // return false.
            if(!CalibrateTarget(
                *calibrationId, 
                targets[highestIndex],
                displayImage,
                windowName))
            {
                QLCalibration_Cancel(*calibrationId);
			    cvReleaseImage(&displayImage);
			    cvDestroyWindow(windowName);
                return false;
            }
        }

    }while(keyPress == 'i');

    // If the user would not like to save the calibration then cancel the
    // calibration and return false.
    if(keyPress == cvWaitKeyEsc)
    {
        QLCalibration_Cancel(*calibrationId);
		cvReleaseImage(&displayImage);
		cvDestroyWindow(windowName);
        return false;
    }


    QLCalibration_Finalize(*calibrationId);
	cvReleaseImage(&displayImage);
	cvDestroyWindow(windowName);
    return true;
}
Esempio n. 15
0
static void ocvThread(void){

	//if(cvDefined==FALSE){
		ttModels theModels;
		ttInit(&theModels);
		
	static GdkPixbuf *pixbuf;
	
	IplImage *theFrame, *segmented;
	static char theStr[12];
  thePixel=cvPoint(0,0);
  //globalFrame=cvCreateImage(size,IPL_DEPTH_8U,3);
	//char theChar;
	
	#if use_webcam==1
	CvCapture* theCamera;
	CvSize size=cvSize(justWidth,justHeight);
	theCamera=cvCaptureFromCAM(-1);
	cvSetCaptureProperty( theCamera,CV_CAP_PROP_FRAME_WIDTH,justWidth );
	cvSetCaptureProperty( theCamera,CV_CAP_PROP_FRAME_HEIGHT,justHeight );
  theFrame=cvCreateImage(size,IPL_DEPTH_8U,3);
	#else
	theFrame=cvLoadImage("images/image02.jpg",1);
	assert(theFrame!=NULL);
	justWidth=theFrame->width;
	justHeight=theFrame->height;
	CvSize size=cvSize(justWidth,justHeight);
		cvConvertImage(theFrame,theFrame,CV_CVTIMG_SWAP_RB);
	#endif
  segmented=cvCreateImage(size,IPL_DEPTH_8U,3);
		
	while (1){
	
	#if use_webcam==1
		theFrame=cvQueryFrame(theCamera);
		
		assert(theFrame!=NULL);
		cvConvertImage(theFrame,theFrame,CV_CVTIMG_SWAP_RB);
	#endif
	
		if(changeFlag==1){
			theRanger.hue=-1;
			theH=ttCalibration(theFrame,&thePixel,&theRanger,NULL);
			theRanger.hue=theH;
			changeFlag=0;
			//getIndex();
			//printf("%d\n",theIndex);
			//updateLimits();
		}
		ttCalibration(theFrame,&thePixel,&theRanger,segmented);
		sprintf(theStr,"Hue=%d",theH);
		getIndex();
		//cvShowImage("window",theImage);
		//theFrame=theImage;
		
		//cvWaitKey(5000);
		
 		gdk_threads_enter();
		pixbuf = gdk_pixbuf_new_from_data ((guchar*) theFrame->imageData,
																		GDK_COLORSPACE_RGB,
																		FALSE,
																		theFrame->depth,
																		theFrame->width,
																		theFrame->height,
																		(theFrame->widthStep),
																		NULL,
																		NULL); 
	
		                 
	//printf("\n\nchingadamadre!\n");CV_CVTIMG_SWAP_RB
    gtk_image_set_from_pixbuf(GTK_IMAGE(image), pixbuf);
    
    pixbuf = gdk_pixbuf_new_from_data ((guchar*) segmented->imageData,
																		GDK_COLORSPACE_RGB,
																		FALSE,
																		theFrame->depth,
																		theFrame->width,
																		theFrame->height,
																		(theFrame->widthStep),
																		NULL,
																		NULL); 
																		
    gtk_image_set_from_pixbuf(GTK_IMAGE(gseg), pixbuf);
    gtk_label_set_text((GtkLabel *)hval,theStr);
  	gdk_threads_leave();
    //cvWaitKey();
    #if use_webcam==0
	  g_usleep(50000);
	  #endif
	}
}
Esempio n. 16
0
/*
 * Creates a new experiment object and sets values to zero.
 */
Experiment* CreateExperimentStruct() {

	/** Create Experiment Object **/
	Experiment* exp;
	exp = (Experiment*) malloc(sizeof(Experiment));

	/*************************************/
	/**  Set Everything to zero or NULL **/
	/*************************************/

	/** Simulation? True/False **/
	exp->SimDLP = 0;
	exp->VidFromFile = 0;

	/** GuiWindowNames **/
	exp->WinDisp = NULL;
	exp->WinCon1 = NULL;
	exp->WinCon2 = NULL;
	exp->WinCon3 = NULL;

	/** Error information **/
	exp->e = 0;

	/** CommandLine Input **/
	exp->argv = NULL;
	exp->argc = 0;
	exp->outfname = NULL;
	exp->infname = NULL;
	exp->dirname = NULL;
	exp->protocolfname = NULL;

	/** Protocol Data **/
	exp->p = NULL;
	exp->pflag = 0;

	/** Camera Input**/
	exp->MyCamera = NULL;

	/** FrameGrabber Input **/
	exp->fg = NULL;
	exp->UseFrameGrabber = FALSE;

	/** Video input **/
	exp->capture = NULL;

	/** Last Observerd CamFrameNumber **/
	exp->lastFrameSeenOutside = 0;

	/** DLP Output **/
	exp->myDLP = 0;

	/** Calibration Data  Object**/
	exp->Calib = NULL;

	/** User-configurable Worm-related Parameters **/
	exp->Params = NULL;

	/** Information about Our Worm **/
	exp->Worm = NULL;

	/** Information about the Previous frame's Worm **/
	exp->PrevWorm = NULL;

	/** Segmented Worm in DLP Space **/
	exp->segWormDLP = NULL;

	/** internal IplImage **/
	exp->SubSampled = NULL; // Image used to subsample stuff
	exp->HUDS = NULL; //Image used to generate the Heads Up Display
	exp->CurrentSelectedImg = NULL; //The current image selected for display

	/** Internal Frame data types **/
	exp->fromCCD = NULL;
	exp->forDLP = NULL;
	exp->IlluminationFrame = NULL;

	/** Write Data To File **/
	exp->DataWriter = NULL;

	/** Write Video To File **/
	exp->Vid = NULL; //Video Writer
	exp->VidHUDS = NULL;

	/** Timing  Information **/
	exp->now = 0;
	exp->last = 0;

	/** Illumination Timing Info **/
	exp->illumStart = 0;

	/** Illumination Head Tail Timing Info **/
	exp->illumSweepHTtimer=0;

	/** Frame Rate Information **/
	exp->nframes = 0;
	exp->prevFrames = 0;
	exp->prevTime = 0;

	/** Stage Control **/
	exp->stageIsPresent=0;
	exp->stage=NULL;
	exp->stageVel=cvPoint(0,0);
	exp->stageCenter=cvPoint(0,0);
	exp->stageFeedbackTargetOffset=cvPoint(0,0);
	exp->stageIsTurningOff=0;

	/** Macros **/
	exp->RECORDVID = 0;
	exp->RECORDDATA = 0;

	/** Error Handling **/
	exp->e = 0;

	return exp;

}
Esempio n. 17
0
int Img2IPM::process(Point pt[4], string save_path, int img_cols, int img_rows)
{
	int pointNum = 4;
	Point2f * srcPoint = new Point2f[pointNum];
	Point2f * dstPoint = new Point2f[pointNum];

	CvSize ipmSize;


	//v2 0514
	ipmSize.width = 260;
	ipmSize.height = 300;
	
	//sample4  °üº¬³µÍ·
	{
		/// sample1 and sample3
		srcPoint[0] = cvPoint(pt[0].x, pt[0].y);
		srcPoint[1] = cvPoint(pt[1].x, pt[1].y);
		srcPoint[2] = cvPoint(pt[2].x, pt[2].y);
		srcPoint[3] = cvPoint(pt[3].x, pt[3].y);

		//v2 0514
		dstPoint[0] = cvPoint(105, 284);
		dstPoint[1] = cvPoint(154, 284);
		dstPoint[2] = cvPoint(154, 11);
		dstPoint[3] = cvPoint(105, 11);
	}

	Mat img2ipmMatrix = getPerspectiveTransform(srcPoint, dstPoint);
	Mat ipm2imgMatrix = getPerspectiveTransform(dstPoint, srcPoint);

	ofstream ipm2iniFile((save_path + "ipm2ini.txt").c_str());
	for (int i = 0; i < ipm2imgMatrix.rows; i++)
	{
		double * aa = ipm2imgMatrix.ptr<double>(i);
		for (int j = 0; j < ipm2imgMatrix.cols; j++)
		{
			ipm2iniFile<<aa[j]<<" ";
		}
		ipm2iniFile<<endl;
	}
	ipm2iniFile.close();

	ofstream ini2ipmFile((save_path + "ini2ipm.txt").c_str());
	for (int i = 0; i < img2ipmMatrix.rows; i++)
	{
		double * aa = img2ipmMatrix.ptr<double>(i);
		for (int j = 0; j < img2ipmMatrix.cols; j++)
		{
			ini2ipmFile<<aa[j]<<" ";
		}
		ini2ipmFile<<endl;
	}
	ini2ipmFile.close();

	

	ofstream outlierFile((save_path +"outlier.txt").c_str());
	vector<Point2f> allIpmPoints;
	vector<Point2f> allIpmPoints2IniImg;
	int num2 = 0;
	for (int m = 0; m < ipmSize.height; m++)
	{
		for (int n = 0; n < ipmSize.width; n++)
		{
			allIpmPoints.push_back(cvPoint(n,m));
			num2++;
		}
	}
	perspectiveTransform(allIpmPoints, allIpmPoints2IniImg, ipm2imgMatrix);
	num2 = 0;
	for (int m = 0; m < ipmSize.height; m++)
	{
		for (int n = 0; n < ipmSize.width; n++)
		{
			Point2f tmpPt = allIpmPoints2IniImg[num2];
			if (tmpPt.x < 5 || tmpPt.x > img_cols - 5 || tmpPt.y < 5 || tmpPt.y > img_rows - 5)
			{
				// outlier
				outlierFile<<n<<" "<<m<<endl;
			}
			num2++;
		}
	}
	outlierFile.close();

	delete [] srcPoint;
	delete [] dstPoint;

	return 0;
}
Esempio n. 18
0
static void putDistance(IplImage *Image,
                        std::vector<cv_tracker::image_rect_ranged> objects,
                        int threshold_height,
                        const char* objectLabel)
{
  char distance_string[32];
  CvFont dfont;
  float hscale	    = 0.7f;
  float vscale	    = 0.7f;
  float italicscale = 0.0f;
  int	thickness   = 1;

  CvFont      dfont_label;
  float       hscale_label = 0.5f;
  float       vscale_label = 0.5f;
  CvSize      text_size;
  int         baseline     = 0;

  cvInitFont(&dfont_label, CV_FONT_HERSHEY_COMPLEX, hscale_label, vscale_label, italicscale, thickness, CV_AA);
  cvGetTextSize(objectLabel,
                &dfont_label,
                &text_size,
                &baseline);

  for (unsigned int i=0; i<objects.size(); i++)
    {
      if (objects.at(i).rect.y > threshold_height) // temporal way to avoid drawing detections in the sky
        {
          if (!isNearlyNODATA(objects.at(i).range))
            {
              /* put label */
              CvPoint labelOrg = cvPoint(objects.at(i).rect.x - OBJ_RECT_THICKNESS,
                                         objects.at(i).rect.y - baseline - OBJ_RECT_THICKNESS);

              cvRectangle(Image,
                          cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                          cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                          CV_RGB(0, 0, 0), // label background is black
                          -1, 8, 0
                          );
              cvPutText(Image,
                        objectLabel,
                        labelOrg,
                        &dfont_label,
                        CV_RGB(255, 255, 255) // label text color is white
                        );

              /* put distance data */
              cvRectangle(Image,
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 45),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 5),
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) + (((int)log10(objects.at(i).range/100)+1) * 8 + 38),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 30),
                          cv::Scalar(255,255,255),
                          -1);

              cvInitFont (&dfont,
                          CV_FONT_HERSHEY_COMPLEX,
                          hscale,
                          vscale,
                          italicscale,
                          thickness,
                          CV_AA);

              sprintf(distance_string, "%.2f m", objects.at(i).range / 100); //unit of length is meter
              cvPutText(Image,
                        distance_string,
                        cvPoint(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 40),
                                objects.at(i).rect.y + objects.at(i).rect.height + 25),
                        &dfont,
                        CV_RGB(255, 0, 0));
            }
        }
    }
}
Esempio n. 19
0
/**
   Calculates the optical flow between two images.

   @param[in] imgA   First input image
   @param[in] imgB   Second input image, the flow is calculated from imgA to imgB
   @param[out] imgU   Horizontal flow field
   @param[out] imgV   Vertical flow field
   @param[in] saved_data   Flag indicates previous imgB is now imgA (ie subsequent frames), save some time in calculation
   
   @return   Flag to indicate succesful completion

*/
int VarFlow::CalcFlow(IplImage* imgA, IplImage* imgB, IplImage* imgU, IplImage* imgV, bool saved_data = false){
    
    if(!initialized)
      return 0;
      
    IplImage* swap_img;
      
    //Don't recalculate imgAfloat, just swap with imgBfloat
    if(saved_data){
        
       CV_SWAP(imgAfloat, imgBfloat, swap_img);
       
       cvResize(imgB, imgBsmall, CV_INTER_LINEAR);
       cvConvert(imgBsmall, imgBfloat);  // Calculate new imgBfloat
       cvSmooth(imgBfloat, imgBfloat, CV_GAUSSIAN, 0, 0, sigma, 0 );
       
    }
    
    //Calculate imgAfloat as well as imgBfloat
    else{
    
        cvResize(imgA, imgAsmall, CV_INTER_LINEAR);
        cvResize(imgB, imgBsmall, CV_INTER_LINEAR);
    
        cvConvert(imgAsmall, imgAfloat);
        cvConvert(imgBsmall, imgBfloat);
    
        cvSmooth(imgAfloat, imgAfloat, CV_GAUSSIAN, 0, 0, sigma, 0 );
        cvSmooth(imgBfloat, imgBfloat, CV_GAUSSIAN, 0, 0, sigma, 0 );
        
    }
    
    cvFilter2D(imgAfloat, imgAfx, &fx_mask, cvPoint(-1,-1));  // X spacial derivative
    cvFilter2D(imgAfloat, imgAfy, &fy_mask, cvPoint(-1,-1));  // Y spacial derivative
    
    cvSub(imgBfloat, imgAfloat, imgAft, NULL);  // Temporal derivative
    
    cvMul(imgAfx,imgAfx,imgAfxfx_array[0], 1);
    cvMul(imgAfx,imgAfy,imgAfxfy_array[0], 1);
    cvMul(imgAfx,imgAft,imgAfxft_array[0], 1);
    cvMul(imgAfy,imgAfy,imgAfyfy_array[0], 1);
    cvMul(imgAfy,imgAft,imgAfyft_array[0], 1);
    
    cvSmooth(imgAfxfx_array[0], imgAfxfx_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfxfy_array[0], imgAfxfy_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfxft_array[0], imgAfxft_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfyfy_array[0], imgAfyfy_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    cvSmooth(imgAfyft_array[0], imgAfyft_array[0], CV_GAUSSIAN, 0, 0, rho, 0 );
    
    int i;
    
    //Fill all the levels of the multigrid algorithm with resized images
    for(i = 1; i < (max_level - start_level + 1); i++){
        
        cvResize(imgAfxfx_array[i-1], imgAfxfx_array[i], CV_INTER_LINEAR);
        cvResize(imgAfxfy_array[i-1], imgAfxfy_array[i], CV_INTER_LINEAR);
        cvResize(imgAfxft_array[i-1], imgAfxft_array[i], CV_INTER_LINEAR);
        cvResize(imgAfyfy_array[i-1], imgAfyfy_array[i], CV_INTER_LINEAR);
        cvResize(imgAfyft_array[i-1], imgAfyft_array[i], CV_INTER_LINEAR);
        
    }
    
    int k = (max_level - start_level);

    while(1)
		{    
        gauss_seidel_recursive(k, (max_level - start_level), k, pow((float)2.0,(float)(k)), imgAfxft_array, imgAfyft_array);
            
        if(k > 0){            
            // Transfer velocity from coarse to fine                           
            cvResize(imgU_array[k], imgU_array[k-1], CV_INTER_LINEAR);
            cvResize(imgV_array[k], imgV_array[k-1], CV_INTER_LINEAR);
            
            k--;            
        }else{
          break;
				}            
    }
    
    // Transfer to output image, resize if necessary
    cvResize(imgU_array[0], imgU, CV_INTER_LINEAR);
    cvResize(imgV_array[0], imgV, CV_INTER_LINEAR);
    
    // If started algorithm with smaller image than original, scale the flow field
    if(start_level > 0){
	
		cvScale(imgU, imgU, pow(2.0, start_level));
		cvScale(imgV, imgV, pow(2.0, start_level));
		
	}
    
    return 1;
}
Esempio n. 20
0
void MBLBPDetectSingleScale( const IplImage* img,
                             MBLBPCascade * pCascade,
                             CvSeq * positions, 
                             CvSize winStride)
{
    IplImage * sum = 0;
    int ystep, xstep, ymax, xmax;
    
    CV_FUNCNAME( "MBLBPDetectSingleScale" );

    __BEGIN__;


    if( !img )
        CV_ERROR( CV_StsNullPtr, "Null image pointer" );

    if( ! pCascade) 
        CV_ERROR( CV_StsNullPtr, "Invalid classifier cascade" );

    if( !positions )
        CV_ERROR( CV_StsNullPtr, "Null CvSeq pointer" );

    if(pCascade->win_width > img->width || 
       pCascade->win_height > img->height)
        return ;



    CV_CALL( sum = cvCreateImage(cvSize(img->width, img->height), IPL_DEPTH_32S, 1));
    myIntegral(img, sum);
    //cvIntegral(img, sum);
    UpdateCascade(pCascade, sum);

    ystep = winStride.height;
    xstep = winStride.width;
    ymax = img->height - pCascade->win_height -1;
	xmax = img->width  - pCascade->win_width -1;

#ifdef _OPENMP
    #pragma omp parallel for
#endif

	for(int iy = 0; iy < ymax; iy+=ystep)
    {
       for(int ix = 0; ix < xmax; ix+=xstep)
        {
            int w_offset = iy * sum->widthStep / sizeof(int) + ix;
			int result = DetectAt(pCascade, w_offset);
            if( result > 0)
            {
                //since the integral image is different with that of OpenCV,
                //update the position to OpenCV's by adding 1.
                CvPoint pt = cvPoint(ix+1, iy+1);
#ifdef _OPENMP
omp_set_lock(&lock); 
#endif
                cvSeqPush(positions, &pt);
#ifdef _OPENMP
omp_unset_lock(&lock);
#endif
			}
			if(result == 0)
			{
				ix += xstep;
			}
        }
    }

    __END__;

    cvReleaseImage(&sum);
    return ;
}
Esempio n. 21
0
std::list<Garbage*>
GarbageRecognition::garbageList(IplImage * src, IplImage * model) {



    std::list<Garbage*> garbageList;

    //cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
    //object model

    //image for the histogram-based filter
    //could be a parameter

    //~ cvNamedWindow("andImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("andSimage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("andSIImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("drawContours",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("andSThreshImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("threshImage",CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("andSequalizedImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("morphImage",CV_WINDOW_AUTOSIZE);

    utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
    CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

    //~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
    //~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);



    //gets a frame for setting  image size
    //CvSize srcSize = cvSize(frameWidth,frameHeight);
    CvSize srcSize = cvGetSize(src);

    //images for HSV conversion
    IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
    IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
    IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
    IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );


    //Image for thresholding
    IplImage * andImage=cvCreateImage(srcSize,8,1);

    IplImage * andSimage=cvCreateImage(srcSize,8,1);
    IplImage * andSThreshImage=cvCreateImage(srcSize,8,1);
    IplImage * andSequalizedImage=cvCreateImage(srcSize,8,1);
    IplImage * andSIImage=cvCreateImage(srcSize,8,1);

    //Image for thresholding
    IplImage * threshImage=cvCreateImage(srcSize,8,1);

    //image for equalization
    IplImage * equalizedImage=cvCreateImage(srcSize,8,1);

    //image for Morphing operations(Dilate-erode)
    IplImage * morphImage=cvCreateImage(srcSize,8,1);

    //image for image smoothing
    IplImage * smoothImage=cvCreateImage(srcSize,8,1);

    //image for contour-finding operations
    IplImage * contourImage=cvCreateImage(srcSize,8,3);


    int frameCounter=1;
    int cont_index=0;

    //convolution kernel for morph operations
    IplConvKernel* element;

    CvRect boundingRect;

    //contours
    CvSeq * contours;

    //Main loop


    //convert image to hsv
    cvCvtColor( src, hsv, CV_BGR2HSV );
    cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );


    /*I(x,y)blue ~ ((uchar*)(img->imageData + img->widthStep*y))[x*3]
    I(x,y)green ~ ((uchar*)(img->imageData + img->widthStep*y))[x*3+1]
    I(x,y)red ~ ((uchar*)(img->imageData + img->widthStep*y))[x*3+2]*/

    for(int x=0; x<640; x++) {
        for(int y=0; y<480; y++) {
            uchar * hue=&((uchar*) (h_plane->imageData+h_plane->widthStep*y))[x];
            uchar * sat=&((uchar*) (s_plane->imageData+s_plane->widthStep*y))[x];
            uchar * val=&((uchar*) (v_plane->imageData+v_plane->widthStep*y))[x];
            if((*hue>20 && *hue<40 && *sat>60))
                *hue=255;
            else
                *hue=0;
        }
    }
    cvAnd(h_plane, v_plane, andImage);
    cvAnd(h_plane, s_plane, andSimage);


    //apply morphologic operations
    element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
                                            MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
                                            CV_SHAPE_RECT, NULL);


    cvDilate(andImage,morphImage,element,MORPH_DILATE_ITER);
    cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);


    cvThreshold(morphImage,threshImage,120,255,CV_THRESH_BINARY);

    //get all contours
    contours=myFindContours(threshImage);
    //contours=myFindContours(smoothImage);


    cont_index=0;

    cvCopy(src,contourImage,0);



    while(contours!=NULL) {

        CvSeq * aContour=getPolygon(contours);
        utils::Contours * ct = new Contours(aContour);




        //apply filters


        if( ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER) &&
                ct->areaFilter(MINCONTOUR_AREA,MAXCONTOUR_AREA) &&
                //ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO) &&
                ct->boxAreaFilter(BOXFILTER_TOLERANCE) &&
                //ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN)&&
                1) {



            //get contour bounding box
            boundingRect=cvBoundingRect(ct->getContour(),0);
            cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
                        cvPoint(boundingRect.x+boundingRect.width,
                                boundingRect.y+boundingRect.height),
                        _GREEN,1,8,0);


            //if passed filters
            ct->printContour(3,cvScalar(127,127,0,0),
                             contourImage);

            std::vector<int> centroid(2);
            centroid=ct->getCentroid();


            //build garbage List
            utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
                    boundingRect.y,boundingRect.width,boundingRect.height);

            utils::Garbage * aGarbage = new utils::Garbage(r,centroid);

            garbageList.push_back(aGarbage);


        }

        delete ct;
        cvReleaseMemStorage( &aContour->storage );
        contours=contours->h_next;
        cont_index++;
    }

    cvShowImage("drawContours",contourImage);
    // cvWaitKey(0);
    delete h;


    cvReleaseHist(&testImageHistogram);
    //Image for thresholding
    //cvReleaseMemStorage( &contours->storage );
    cvReleaseImage(&threshImage);
    cvReleaseImage(&equalizedImage);
    cvReleaseImage(&morphImage);
    cvReleaseImage(&smoothImage);
    cvReleaseImage(&contourImage);

    cvReleaseImage(&hsv);
    cvReleaseImage(&h_plane);
    cvReleaseImage(&s_plane);
    cvReleaseImage(&v_plane);
    cvReleaseImage(&andImage);
    cvReleaseImage(&andSimage);
    cvReleaseImage(&andSThreshImage);
    cvReleaseImage(&andSequalizedImage);



    return garbageList;
}
static void computeVectors( IplImage* img, IplImage* dst, short wROI, short hROI){
	if(DEBUG){
		std::cout << "-- VECTOR COMPUTING" << std::endl;		
	}
	double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
	CvSize size = cvSize(img->width,img->height); // get current frame size 640x480
	int i, idx1 = last, idx2;
	CvSeq* seq;
	CvRect comp_rect;
	CvRect roi;
	double count;
	double angle;
	CvPoint center;
	double magnitude;
	CvScalar color;

	//--SURF CORNERS--
	if(DEBUG){
		std::cout << "--- SURF CORNERS" << std::endl;
	}
        color = CV_RGB(0,255,0);
        CvMemStorage* storage2 = cvCreateMemStorage(0);
        CvSURFParams params = cvSURFParams(SURF_THRESHOLD, 1);
        CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
        cvExtractSURF( dst, 0, &imageKeypoints, &imageDescriptors, storage2, params );
        if(DEBUG){
			printf("Image Descriptors: %d\n", imageDescriptors->total);
		}
        for( int j = 0; j < imageKeypoints->total; j++ ){
            CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, j );
			center.x = cvRound(r->pt.x);
            center.y = cvRound(r->pt.y);
			if(DEBUG){
				printf("j: %d \t", j);               
				printf("total: %d \t", imageKeypoints->total);               
				printf("valor hessiano: %f \t", r->hessian);
				printf("x: %d \t", center.x);
				printf("y: %d \n", center.y);
			}
			// Agrego el Punto en donde es la region que nos interesa
			cvCircle( dst, center, cvRound(r->hessian*0.02), color, 3, CV_AA, 0 );
			// Lleno la matriz con los vectores
			relevancePointToVector(center.x, center.y, wROI, hROI, 5);
		}
	//--SURF CORNERS


	// calculate motion gradient orientation and valid orientation mask
	cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
	
	// Compute Motion on 4x4 Cuadrants
	if(DEBUG){
		std::cout << "--- MOTION CUADRANTS" << std::endl;
	}
	i	 = 25;
	color = CV_RGB(255,0,0);
	magnitude = 30;
	for (int r = 0; r < size.height; r += hROI){
		for (int c = 0; c < size.width; c += wROI){
			comp_rect.x = c;
			comp_rect.y = r;
			comp_rect.width = (c + wROI > size.width) ? (size.width - c) : wROI;
			comp_rect.height = (r + hROI > size.height) ? (size.height - r) : hROI;

			cvSetImageROI( mhi, comp_rect );
			cvSetImageROI( orient, comp_rect );
			cvSetImageROI( mask, comp_rect );
			cvSetImageROI( silh, comp_rect );
			cvSetImageROI( img, comp_rect );

			// Process Motion
			angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
			angle = 360.0 - angle;  // adjust for images with top-left origin
			count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
			roi = cvGetImageROI(mhi);
			center = cvPoint( (comp_rect.x + comp_rect.width/2),
					  (comp_rect.y + comp_rect.height/2) );
			cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
			cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
			cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );	
			
			if(DEBUG){
				std::cout << "Motion " << i << " -> x: " << roi.x << " y: " << roi.y << " count: " << count << " angle: " << angle << std::endl; // print the roi
			}
			cvResetImageROI( mhi );
			cvResetImageROI( orient );
			cvResetImageROI( mask );
			cvResetImageROI( silh );
			cvResetImageROI(img);
			relevanceDirectionToVector(i, angle);
			++i;
		}
	}

	// Compute Global Motion
	if(DEBUG){
		std::cout << "--- MOTION GLOBAL" << std::endl;
	}
	comp_rect = cvRect( 0, 0, size.width, size.height );
	color = CV_RGB(255,255,255);
	magnitude = 100;
	angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
	angle = 360.0 - angle;  // adjust for images with top-left origin
	count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
	roi = cvGetImageROI(mhi);
	center = cvPoint( (comp_rect.x + comp_rect.width/2),
			  (comp_rect.y + comp_rect.height/2) );
	cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
	cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
	cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
	if(DEBUG){
		std::cout << "Motion Main-> x: " << roi.x << " y: " << roi.y << " count: " << count << std::endl; // print the roi
	}
	relevanceDirectionToVector(50, angle);
}
Esempio n. 23
0
int RectDetector::FindShape(const IplImage* img,std::vector<RectShape>& shapes)
{
	if( !img )
	{
        return -1;
	}

	int thresh = 50;
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* contours;
	int i, c, l, N = 11;
	CvSize sz = cvSize( img->width & -2, img->height & -2 ); 
 
	IplImage* timg = cvCloneImage( img );
	IplImage* gray = cvCreateImage( sz, 8, 1 );
	IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
	IplImage* tgray;
	CvSeq* result;
	double s, t;
	// 创建一个空序列用于存储轮廓角点
	//CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );

	cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));
	// 过滤噪音
	//cvPyrDown( timg, pyr, 5 );
	//cvPyrUp( pyr, timg, 5 );
	tgray = cvCreateImage( sz, 8, 1 );
	//cvShowImage("test",timg);
 

	// 提取 the c-th color plane
	cvSetImageCOI( timg, 0 );
	cvCopy( timg, tgray, 0 );

	// 尝试各种阈值提取得到的(N=11)
	for( l = 0; l < N; l++ )
	{
		// apply Canny. Take the upper threshold from slider
		// Canny helps to catch squares with gradient shading  
		if( l == 0 )
		{
			cvCanny( tgray, gray, 0, thresh, 5 );
			//使用任意结构元素膨胀图像
			cvDilate( gray, gray, 0, 2 );
			//cvShowImage("test",gray);
		}
		else
		{
			// apply threshold if l!=0:
			cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
 
		}

		// 找到所有轮廓并且存储在序列中
		cvFindContours( gray, storage, &contours, sizeof(CvContour),
		CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );

		// 遍历找到的每个轮廓contours
		while( contours )
		{
			//用指定精度逼近多边形曲线
			result = cvApproxPoly( contours, sizeof(CvContour), storage,
			CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
                  

			if( result->total == 4 &&
				fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 500 &&
				fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 100000 &&
				cvCheckContourConvexity(result) )
			{
				s = 0;

				for( i = 0; i < 5; i++ )
				{
					// find minimum angle between joint edges (maximum of cosine)
					if( i >= 2 )
					{
						t = fabs(this->CalcAngle((CvPoint*)cvGetSeqElem( result, i ),
										(CvPoint*)cvGetSeqElem( result, i-2 ),
										(CvPoint*)cvGetSeqElem( result, i-1 )));
						s = s > t ? s : t;
					}
				}

				 // if 余弦值 足够小,可以认定角度为90度直角
				 //cos0.1=83度,能较好的趋近直角
				if( s < 0.1 ) 
				{
					RectShape RectShape(*(CvPoint*)cvGetSeqElem( result, 0 ),*(CvPoint*)cvGetSeqElem( result, 1 ),*(CvPoint*)cvGetSeqElem( result, 2 ),*(CvPoint*)cvGetSeqElem( result, 3 ));
					
					bool isExist = false;
					//去重
					for(int j = 0 ; j < shapes.size(); ++j)
					{
						if(RectShape == shapes[j] )
						{
							isExist = true;
							continue;
						}
						//remove the rectangle which contains others
						if(RectShape.isNear(shapes[j]))
						{
							if(RectShape.ctPoint.x > shapes[j].ctPoint.x || RectShape.ctPoint.y > shapes[j].ctPoint.y)
							{
								isExist = true;
								continue;
							}
							else
							{
								shapes[j] = RectShape;
							}
						}
	 

					}

					if(!isExist)
					{
						shapes.push_back(RectShape);
					}
					
				}
			}

			// 继续查找下一个轮廓
			contours = contours->h_next;
		

		}
	}

	cvReleaseImage( &gray );
	cvReleaseImage( &pyr );
	cvReleaseImage( &tgray );
	cvReleaseImage( &timg );
	cvClearMemStorage( storage );
	


	return shapes.size();
}
Esempio n. 24
0
CvSeq *
cvFindNextContour( CvContourScanner scanner )
{
    char *img0;
    char *img;
    int step;
    int width, height;
    int x, y;
    int prev;
    CvPoint lnbd;
    CvSeq *contour = 0;
    int nbd;
    int mode;
    CvStatus result = (CvStatus) 1;

    CV_FUNCNAME( "cvFindNextContour" );

    __BEGIN__;

    if( !scanner )
        CV_ERROR( CV_StsNullPtr, "" );
    icvEndProcessContour( scanner );

    /* initialize local state */
    img0 = scanner->img0;
    img = scanner->img;
    step = scanner->img_step;
    x = scanner->pt.x;
    y = scanner->pt.y;
    width = scanner->img_size.width;
    height = scanner->img_size.height;
    mode = scanner->mode;
    lnbd = scanner->lnbd;
    nbd = scanner->nbd;

    prev = img[x - 1];

    for( ; y < height; y++, img += step )
    {
        for( ; x < width; x++ )
        {
            int p = img[x];

            if( p != prev )
            {
                _CvContourInfo *par_info = 0;
                _CvContourInfo *l_cinfo = 0;
                CvSeq *seq = 0;
                int is_hole = 0;
                CvPoint origin;

                if( !(prev == 0 && p == 1) )    /* if not external contour */
                {
                    /* check hole */
                    if( p != 0 || prev < 1 )
                        goto resume_scan;

                    if( prev & -2 )
                    {
                        lnbd.x = x - 1;
                    }
                    is_hole = 1;
                }

                if( mode == 0 && (is_hole || img0[lnbd.y * step + lnbd.x] > 0) )
                    goto resume_scan;

                origin.y = y;
                origin.x = x - is_hole;

                /* find contour parent */
                if( mode <= 1 || (!is_hole && mode == 2) || lnbd.x <= 0 )
                {
                    par_info = &(scanner->frame_info);
                }
                else
                {
                    int lval = img0[lnbd.y * step + lnbd.x] & 0x7f;
                    _CvContourInfo *cur = scanner->cinfo_table[lval - 2];

                    assert( lval >= 2 );

                    /* find the first bounding contour */
                    while( cur )
                    {
                        if( (unsigned) (lnbd.x - cur->rect.x) < (unsigned) cur->rect.width &&
                            (unsigned) (lnbd.y - cur->rect.y) < (unsigned) cur->rect.height )
                        {
                            if( par_info )
                            {
                                if( icvTraceContour( scanner->img0 +
                                                     par_info->origin.y * step +
                                                     par_info->origin.x, step, img + lnbd.x,
                                                     par_info->is_hole ) > 0 )
                                    break;
                            }
                            par_info = cur;
                        }
                        cur = cur->next;
                    }

                    assert( par_info != 0 );

                    /* if current contour is a hole and previous contour is a hole or
                       current contour is external and previous contour is external then
                       the parent of the contour is the parent of the previous contour else
                       the parent is the previous contour itself. */
                    if( par_info->is_hole == is_hole )
                    {
                        par_info = par_info->parent;
                        /* every contour must have a parent
                           (at least, the frame of the image) */
                        if( !par_info )
                            par_info = &(scanner->frame_info);
                    }

                    /* hole flag of the parent must differ from the flag of the contour */
                    assert( par_info->is_hole != is_hole );
                    if( par_info->contour == 0 )        /* removed contour */
                        goto resume_scan;
                }

                lnbd.x = x - is_hole;

                cvSaveMemStoragePos( scanner->storage2, &(scanner->backup_pos) );

                seq = cvCreateSeq( scanner->seq_type1, scanner->header_size1,
                                   scanner->elem_size1, scanner->storage1 );
                if( !seq )
                {
                    result = CV_OUTOFMEM_ERR;
                    goto exit_func;
                }
                seq->flags |= is_hole ? CV_SEQ_FLAG_HOLE : 0;

                /* initialize header */
                if( mode <= 1 )
                {
                    l_cinfo = &(scanner->cinfo_temp);
                    result = icvFetchContour( img + x - is_hole, step,
                                              cvPoint( origin.x + scanner->offset.x,
                                                       origin.y + scanner->offset.y),
                                              seq, scanner->approx_method1 );
                    if( result < 0 )
                        goto exit_func;
                }
                else
                {
                    union { _CvContourInfo* ci; CvSetElem* se; } v;
                    v.ci = l_cinfo;
                    cvSetAdd( scanner->cinfo_set, 0, &v.se );
                    l_cinfo = v.ci;

                    result = icvFetchContourEx( img + x - is_hole, step,
                                                cvPoint( origin.x + scanner->offset.x,
                                                         origin.y + scanner->offset.y),
                                                seq, scanner->approx_method1,
                                                nbd, &(l_cinfo->rect) );
                    if( result < 0 )
                        goto exit_func;
                    l_cinfo->rect.x -= scanner->offset.x;
                    l_cinfo->rect.y -= scanner->offset.y;

                    l_cinfo->next = scanner->cinfo_table[nbd - 2];
                    scanner->cinfo_table[nbd - 2] = l_cinfo;

                    /* change nbd */
                    nbd = (nbd + 1) & 127;
                    nbd += nbd == 0 ? 3 : 0;
                }

                l_cinfo->is_hole = is_hole;
                l_cinfo->contour = seq;
                l_cinfo->origin = origin;
                l_cinfo->parent = par_info;

                if( scanner->approx_method1 != scanner->approx_method2 )
                {
                    result = icvApproximateChainTC89( (CvChain *) seq,
                                                      scanner->header_size2,
                                                      scanner->storage2,
                                                      &(l_cinfo->contour),
                                                      scanner->approx_method2 );
                    if( result < 0 )
                        goto exit_func;
                    cvClearMemStorage( scanner->storage1 );
                }

                l_cinfo->contour->v_prev = l_cinfo->parent->contour;

                if( par_info->contour == 0 )
                {
                    l_cinfo->contour = 0;
                    if( scanner->storage1 == scanner->storage2 )
                    {
                        cvRestoreMemStoragePos( scanner->storage1, &(scanner->backup_pos) );
                    }
                    else
                    {
                        cvClearMemStorage( scanner->storage1 );
                    }
                    p = img[x];
                    goto resume_scan;
                }

                cvSaveMemStoragePos( scanner->storage2, &(scanner->backup_pos2) );
                scanner->l_cinfo = l_cinfo;
                scanner->pt.x = x + 1;
                scanner->pt.y = y;
                scanner->lnbd = lnbd;
                scanner->img = (char *) img;
                scanner->nbd = nbd;
                contour = l_cinfo->contour;

                result = CV_OK;
                goto exit_func;
              resume_scan:
                prev = p;
                /* update lnbd */
                if( prev & -2 )
                {
                    lnbd.x = x;
                }
            }                   /* end of prev != p */
        }                       /* end of loop on x */

        lnbd.x = 0;
        lnbd.y = y + 1;
        x = 1;
        prev = 0;

    }                           /* end of loop on y */

  exit_func:

    if( result != 0 )
        contour = 0;
    if( result < 0 )
        CV_ERROR_FROM_STATUS( result );

    __END__;

    return contour;
}
int MeanShift(const IplImage* img, int **labels)
{
	DECLARE_TIMING(timer);
	START_TIMING(timer);

	//int level = 1;
	double color_radius2=color_radius*color_radius;
	int minRegion = 50;

	// use Lab rather than L*u*v!
	// since Luv may produce noise points
	IplImage *result = cvCreateImage(cvGetSize(img),img->depth,img->nChannels);
	cvCvtColor(img, result, CV_RGB2Lab);

	// Step One. Filtering stage of meanshift segmentation
	// http://rsbweb.nih.gov/ij/plugins/download/Mean_Shift.java
	for(int i=0;i<img->height;i++) {
		for(int j=0;j<img->width;j++)
		{
			int ic = i;
			int jc = j;
			int icOld, jcOld;
			float LOld, UOld, VOld;
			float L = (float)((uchar *)(result->imageData + i*img->widthStep))[j*result->nChannels + 0];
			float U = (float)((uchar *)(result->imageData + i*img->widthStep))[j*result->nChannels + 1];
			float V = (float)((uchar *)(result->imageData + i*img->widthStep))[j*result->nChannels + 2];
			// in the case of 8-bit and 16-bit images R, G and B are converted to floating-point format and scaled to fit 0 to 1 range
			// http://opencv.willowgarage.com/documentation/c/miscellaneous_image_transformations.html
			L = L*100/255;
			U = U-128;
			V = V-128;
			double shift = 5;
			for (int iters=0;shift > 3 && iters < 100;iters++) 
			{
				icOld = ic;
				jcOld = jc;
				LOld = L;
				UOld = U;
				VOld = V;

				float mi = 0;
				float mj = 0;
				float mL = 0;
				float mU = 0;
				float mV = 0;
				int num=0;

				int i2from = cv::max(0,i-spatial_radius), i2to = cv::min(img->height, i+spatial_radius+1);
				int j2from = cv::max(0,j-spatial_radius), j2to = cv::min(img->width, j+spatial_radius+1);
				for (int i2=i2from; i2 < i2to;i2++) {
					for (int j2=j2from; j2 < j2to; j2++) {
						float L2 = (float)((uchar *)(result->imageData + i2*img->widthStep))[j2*result->nChannels + 0],
							U2 = (float)((uchar *)(result->imageData + i2*img->widthStep))[j2*result->nChannels + 1],
							V2 = (float)((uchar *)(result->imageData + i2*img->widthStep))[j2*result->nChannels + 2];
						L2 = L2*100/255;
						U2 = U2-128;
						V2 = V2-128;

						double dL = L2 - L;
						double dU = U2 - U;
						double dV = V2 - V;
						if (dL*dL+dU*dU+dV*dV <= color_radius2) {
							mi += i2;
							mj += j2;
							mL += L2;
							mU += U2;
							mV += V2;
							num++;
						}
					}
				}
				float num_ = 1.f/num;
				L = mL*num_;
				U = mU*num_;
				V = mV*num_;
				ic = (int) (mi*num_+0.5);
				jc = (int) (mj*num_+0.5);
				int di = ic-icOld;
				int dj = jc-jcOld;
				double dL = L-LOld;
				double dU = U-UOld;
				double dV = V-VOld;

				shift = di*di+dj*dj+dL*dL+dU*dU+dV*dV; 
			}

			L = L*255/100;
			U = U+128;
			V = V+128;
			((uchar *)(result->imageData + i*img->widthStep))[j*result->nChannels + 0] = (uchar)L;
			((uchar *)(result->imageData + i*img->widthStep))[j*result->nChannels + 1] = (uchar)U;
			((uchar *)(result->imageData + i*img->widthStep))[j*result->nChannels + 2] = (uchar)V;
		}
	}

		IplImage *tobeshow = cvCreateImage(cvGetSize(img),img->depth,img->nChannels);
		cvCvtColor(result, tobeshow, CV_Lab2RGB);
		cvSaveImage("filtered.png", tobeshow);
		cvReleaseImage(&tobeshow);

		// Step Two. Cluster
		// Connect
		int regionCount = 0;
		int *modePointCounts = new int[img->height*img->width];
		memset(modePointCounts, 0, img->width*img->height*sizeof(int));
		float *mode = new float[img->height*img->width*3];
		{
			int label = -1;
			for(int i=0;i<img->height;i++) 
				for(int j=0;j<img->width;j++)
					labels[i][j] = -1;
			for(int i=0;i<img->height;i++) 
				for(int j=0;j<img->width;j++) {
					if(labels[i][j]<0)
					{
						labels[i][j] = ++label;
						float L = (float)((uchar *)(result->imageData + i*img->widthStep))[j*result->nChannels + 0],
							U = (float)((uchar *)(result->imageData + i*img->widthStep))[j*result->nChannels + 1],
							V = (float)((uchar *)(result->imageData + i*img->widthStep))[j*result->nChannels + 2];
						mode[label*3+0] = L*100/255;
						mode[label*3+1] = 354*U/255-134;
						mode[label*3+2] = 256*V/255-140;
						// Fill
						std::stack<CvPoint> neighStack;
						neighStack.push(cvPoint(i,j));
						const int dxdy[][2] = {{-1,-1},{-1,0},{-1,1},{0,-1},{0,1},{1,-1},{1,0},{1,1}};
						while(!neighStack.empty())
						{
							CvPoint p = neighStack.top();
							neighStack.pop();
							for(int k=0;k<8;k++)
							{
								int i2 = p.x+dxdy[k][0], j2 = p.y+dxdy[k][1];
								if(i2>=0 && j2>=0 && i2<img->height && j2<img->width && labels[i2][j2]<0 && color_distance(result, i,j,i2,j2)<color_radius2)
								{
									labels[i2][j2] = label;
									neighStack.push(cvPoint(i2,j2));
									modePointCounts[label]++;
									L = (float)((uchar *)(result->imageData + i2*img->widthStep))[j2*result->nChannels + 0];
									U = (float)((uchar *)(result->imageData + i2*img->widthStep))[j2*result->nChannels + 1];
									V = (float)((uchar *)(result->imageData + i2*img->widthStep))[j2*result->nChannels + 2];
									mode[label*3+0] += L*100/255;
									mode[label*3+1] += 354*U/255-134;
									mode[label*3+2] += 256*V/255-140;
								}
							}
						}
						mode[label*3+0] /= modePointCounts[label];
						mode[label*3+1] /= modePointCounts[label];
						mode[label*3+2] /= modePointCounts[label];
					}
				}
					//current Region count
					regionCount = label+1;
		}			
		std::cout<<"Mean Shift(Connect):"<<regionCount<<std::endl;
		int oldRegionCount = regionCount;

		// TransitiveClosure
		for(int counter = 0, deltaRegionCount = 1; counter<5 && deltaRegionCount>0; counter++)
		{
			// 1.Build RAM using classifiction structure
			RAList *raList = new RAList [regionCount], *raPool = new RAList [10*regionCount];	//10 is hard coded!
			for(int i = 0; i < regionCount; i++)
			{
				raList[i].label = i;
				raList[i].next = NULL;
			}
			for(int i = 0; i < regionCount*10-1; i++)
			{
				raPool[i].next = &raPool[i+1];
			}
			raPool[10*regionCount-1].next = NULL;
			RAList	*raNode1, *raNode2, *oldRAFreeList, *freeRAList = raPool;
			for(int i=0;i<img->height;i++) {
				for(int j=0;j<img->width;j++)
				{
					if(i>0 && labels[i][j]!=labels[i-1][j])
					{
						// Get 2 free node
						raNode1			= freeRAList;
						raNode2			= freeRAList->next;
						oldRAFreeList	= freeRAList;
						freeRAList		= freeRAList->next->next;
						// connect the two region
						raNode1->label	= labels[i][j];
						raNode2->label	= labels[i-1][j];
						if(raList[labels[i][j]].Insert(raNode2))	//already exists!
							freeRAList = oldRAFreeList;
						else
							raList[labels[i-1][j]].Insert(raNode1);
					}
					if(j>0 && labels[i][j]!=labels[i][j-1])
					{
						// Get 2 free node
						raNode1			= freeRAList;
						raNode2			= freeRAList->next;
						oldRAFreeList	= freeRAList;
						freeRAList		= freeRAList->next->next;
						// connect the two region
						raNode1->label	= labels[i][j];
						raNode2->label	= labels[i][j-1];
						if(raList[labels[i][j]].Insert(raNode2))
							freeRAList = oldRAFreeList;
						else
							raList[labels[i][j-1]].Insert(raNode1);
					}
				}
			}

				// 2.Treat each region Ri as a disjoint set
				for(int i = 0; i < regionCount; i++)
				{
					RAList	*neighbor = raList[i].next;
					while(neighbor)
					{
						if(color_distance(&mode[3*i], &mode[3*neighbor->label])<color_radius2)
						{
							int iCanEl = i, neighCanEl	= neighbor->label;
							while(raList[iCanEl].label != iCanEl) iCanEl = raList[iCanEl].label;
							while(raList[neighCanEl].label != neighCanEl) neighCanEl = raList[neighCanEl].label;
							if(iCanEl<neighCanEl)
								raList[neighCanEl].label = iCanEl;
							else
							{
								//raList[raList[iCanEl].label].label = iCanEl;
								raList[iCanEl].label = neighCanEl;
							}
						}
						neighbor = neighbor->next;
					}
				}
				// 3. Union Find
				for(int i = 0; i < regionCount; i++)
				{
					int iCanEl	= i;
					while(raList[iCanEl].label != iCanEl) iCanEl	= raList[iCanEl].label;
					raList[i].label	= iCanEl;
				}
				// 4. Traverse joint sets, relabeling image.
				int *modePointCounts_buffer = new int[regionCount];
				memset(modePointCounts_buffer, 0, regionCount*sizeof(int));
				float *mode_buffer = new float[regionCount*3];
				int	*label_buffer = new int[regionCount];

				for(int i=0;i<regionCount; i++)
				{
					label_buffer[i]	= -1;
					mode_buffer[i*3+0] = 0;
					mode_buffer[i*3+1] = 0;
					mode_buffer[i*3+2] = 0;
				}
				for(int i=0;i<regionCount; i++)
				{
					int iCanEl	= raList[i].label;
					modePointCounts_buffer[iCanEl] += modePointCounts[i];
					for(int k=0;k<3;k++)
						mode_buffer[iCanEl*3+k] += mode[i*3+k]*modePointCounts[i];
				}
				int	label = -1;
				for(int i = 0; i < regionCount; i++)
				{
					int iCanEl	= raList[i].label;
					if(label_buffer[iCanEl] < 0)
					{
						label_buffer[iCanEl]	= ++label;

						for(int k = 0; k < 3; k++)
							mode[label*3+k]	= (mode_buffer[iCanEl*3+k])/(modePointCounts_buffer[iCanEl]);

						modePointCounts[label]	= modePointCounts_buffer[iCanEl];
					}
				}
				regionCount = label+1;
				for(int i = 0; i < img->height; i++)
					for(int j = 0; j < img->width; j++)
						labels[i][j]	= label_buffer[raList[labels[i][j]].label];

				delete [] mode_buffer;
				delete [] modePointCounts_buffer;
				delete [] label_buffer;

				//Destroy RAM
				delete[] raList;
				delete[] raPool;

				deltaRegionCount = oldRegionCount - regionCount;
				oldRegionCount = regionCount;
				std::cout<<"Mean Shift(TransitiveClosure):"<<regionCount<<std::endl;
		}

		// Prune
		{
			int *modePointCounts_buffer = new int[regionCount];
			float *mode_buffer = new float[regionCount*3];
			int	*label_buffer = new int [regionCount];
			int minRegionCount;

			do{
				minRegionCount = 0;
				// Build RAM again
				RAList *raList = new RAList [regionCount], *raPool = new RAList [10*regionCount];	//10 is hard coded!
				for(int i = 0; i < regionCount; i++)
				{
					raList[i].label = i;
					raList[i].next = NULL;
				}
				for(int i = 0; i < regionCount*10-1; i++)
				{
					raPool[i].next = &raPool[i+1];
				}
				raPool[10*regionCount-1].next = NULL;
				RAList	*raNode1, *raNode2, *oldRAFreeList, *freeRAList = raPool;
				for(int i=0;i<img->height;i++) {
					for(int j=0;j<img->width;j++)
					{
						if(i>0 && labels[i][j]!=labels[i-1][j])
						{
							// Get 2 free node
							raNode1			= freeRAList;
							raNode2			= freeRAList->next;
							oldRAFreeList	= freeRAList;
							freeRAList		= freeRAList->next->next;
							// connect the two region
							raNode1->label	= labels[i][j];
							raNode2->label	= labels[i-1][j];
							if(raList[labels[i][j]].Insert(raNode2))	//already exists!
								freeRAList = oldRAFreeList;
							else
								raList[labels[i-1][j]].Insert(raNode1);
						}
						if(j>0 && labels[i][j]!=labels[i][j-1])
						{
							// Get 2 free node
							raNode1			= freeRAList;
							raNode2			= freeRAList->next;
							oldRAFreeList	= freeRAList;
							freeRAList		= freeRAList->next->next;
							// connect the two region
							raNode1->label	= labels[i][j];
							raNode2->label	= labels[i][j-1];
							if(raList[labels[i][j]].Insert(raNode2))
								freeRAList = oldRAFreeList;
							else
								raList[labels[i][j-1]].Insert(raNode1);
						}
					}
				}
					// Find small regions
					for(int i = 0; i < regionCount; i++) {
						if(modePointCounts[i] < minRegion)
						{
							minRegionCount++;
							RAList *neighbor = raList[i].next;
							int candidate = neighbor->label;
							float minDistance = color_distance(&mode[3*i], &mode[3*candidate]);
							neighbor = neighbor->next;
							while(neighbor)
							{
								float minDistance2 = color_distance(&mode[3*i], &mode[3*neighbor->label]);
								if(minDistance2<minDistance)
								{
									minDistance = minDistance2;
									candidate = neighbor->label;
								}
								neighbor = neighbor->next;
							}
							int iCanEl = i, neighCanEl	= candidate;
							while(raList[iCanEl].label != iCanEl) iCanEl = raList[iCanEl].label;
							while(raList[neighCanEl].label != neighCanEl) neighCanEl = raList[neighCanEl].label;
							if(iCanEl < neighCanEl)
								raList[neighCanEl].label	= iCanEl;
							else
							{
								//raList[raList[iCanEl].label].label	= neighCanEl;
								raList[iCanEl].label = neighCanEl;
							}
						}
					}
						for(int i = 0; i < regionCount; i++)
						{
							int iCanEl	= i;
							while(raList[iCanEl].label != iCanEl)
								iCanEl	= raList[iCanEl].label;
							raList[i].label	= iCanEl;
						}
						memset(modePointCounts_buffer, 0, regionCount*sizeof(int));
						for(int i = 0; i < regionCount; i++)
						{
							label_buffer[i]	= -1;
							mode_buffer[3*i+0]	= 0;
							mode_buffer[3*i+1]	= 0;
							mode_buffer[3*i+2]	= 0;
						}
						for(int i=0;i<regionCount; i++)
						{
							int iCanEl	= raList[i].label;
							modePointCounts_buffer[iCanEl] += modePointCounts[i];
							for(int k=0;k<3;k++)
								mode_buffer[iCanEl*3+k] += mode[i*3+k]*modePointCounts[i];
						}
						int	label = -1;
						for(int i = 0; i < regionCount; i++)
						{
							int iCanEl	= raList[i].label;
							if(label_buffer[iCanEl] < 0)
							{
								label_buffer[iCanEl]	= ++label;

								for(int k = 0; k < 3; k++)
									mode[label*3+k]	= (mode_buffer[iCanEl*3+k])/(modePointCounts_buffer[iCanEl]);

								modePointCounts[label]	= modePointCounts_buffer[iCanEl];
							}
						}
						regionCount = label+1;
						for(int i = 0; i < img->height; i++)
							for(int j = 0; j < img->width; j++)
								labels[i][j]	= label_buffer[raList[labels[i][j]].label];

						//Destroy RAM
						delete[] raList;
						delete[] raPool;
						std::cout<<"Mean Shift(Prune):"<<regionCount<<std::endl;
			}while(minRegionCount > 0);

			delete [] mode_buffer;
			delete [] modePointCounts_buffer;
			delete [] label_buffer;
		}

		// Output
		STOP_TIMING(timer);
		std::cout<<"Mean Shift(ms):"<<GET_TIMING(timer)<<std::endl;

		cvReleaseImage(&result);
		delete []mode;
		delete []modePointCounts;
		return regionCount;
}
Esempio n. 26
0
bool findObjectSURF( cv::Mat objectMat, cv::Mat sceneMat, int hessianValue )
{
    bool objectFound = false;
    float nndrRatio = 0.7f;
    //vector of keypoints
    vector< cv::KeyPoint > keypointsO;
    vector< cv::KeyPoint > keypointsS;

    Mat descriptors_object, descriptors_scene;

    //-- Step 1: Extract keypoints
    SurfFeatureDetector surf(hessianValue);
    surf.detect(sceneMat,keypointsS);
    if(keypointsS.size() < 7) return false; //Not enough keypoints, object not found
    surf.detect(objectMat,keypointsO);
    if(keypointsO.size() < 7) return false; //Not enough keypoints, object not found

    //-- Step 2: Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    extractor.compute( sceneMat, keypointsS, descriptors_scene );
    extractor.compute( objectMat, keypointso, descriptors_object );

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    cv::FlannBasedMatcher matcher;
    descriptors_scene.size(), keypointsO.size(), keypointsS.size());
    std::vector<std::vector<cv::DMatch> > matches;
    matcher.knnMatch( descriptors_object, descriptors_scene, matches, 2 );
    vector< cv::DMatch > good_matches;
    good_matches.reserve(matches.size());

    for (size_t i = 0; i < matches.size(); ++i)
    {
        if (matches[i].size() < 2)
            continue;

        const cv::DMatch &m1 = matches[i][0];
        const cv::DMatch &m2 = matches[i][1];

        if(m1.distance <= nndrRatio * m2.distance)
            good_matches.push_back(m1);
    }



    if( (good_matches.size() >=7))
    {
        std::cout << "OBJECT FOUND!" << std::endl;

        std::vector< cv::Point2f > obj;
        std::vector< cv::Point2f > scene;

        for( unsigned int i = 0; i < good_matches.size(); i++ )
        {
            //-- Get the keypoints from the good matches
            obj.push_back( keypointsO[ good_matches[i].queryIdx ].pt );
            scene.push_back( keypointsS[ good_matches[i].trainIdx ].pt );
        }

        Mat H = findHomography( obj, scene, CV_RANSAC );

        //-- Get the corners from the image_1 ( the object to be "detected" )
        std::vector< Point2f > obj_corners(4);
        obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( objectMat.cols, 0 );
        obj_corners[2] = cvPoint( objectMat.cols, objectMat.rows ); obj_corners[3] = cvPoint( 0, objectMat.rows );
        std::vector< Point2f > scene_corners(4);

        perspectiveTransform( obj_corners, scene_corners, H);


        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        line( objectMat, scene_corners[0] , scene_corners[1], color, 2 ); //TOP line
        line( objectMat, scene_corners[1] , scene_corners[2], color, 2 );
        line( objectMat, scene_corners[2] , scene_corners[3], color, 2 );
        line( objectMat, scene_corners[3] , scene_corners[0] , color, 2 );
        objectFound=true;
    } else {
        std::cout << "OBJECT NOT FOUND!" << std::endl;
    }

    std::cout << "Matches found: " << matches.size() << std::endl;
    std::cout << "Good matches found: " << good_matches.size() << std::endl;

    return objectFound;
}
Esempio n. 27
0
void Figure::DrawAxis(IplImage *output)
{
	int bs = border_size;		
	int h = figure_size.height;
	int w = figure_size.width;

	// size of graph
	int gh = h - bs * 2;
	int gw = w - bs * 2;

	// draw the horizontal and vertical axis
	// let x, y axies cross at zero if possible.
	float y_ref = y_min;
	if ((y_max > 0) && (y_min <= 0))
		y_ref = 0;

	int x_axis_pos = h - bs - cvRound((y_ref - y_min) * y_scale);

	cvLine(output, cvPoint(bs,     x_axis_pos), 
		           cvPoint(w - bs, x_axis_pos),
				   axis_color);
	cvLine(output, cvPoint(bs, h - bs), 
		           cvPoint(bs, h - bs - gh),
				   axis_color);

	// Write the scale of the y axis
	CvFont font;
	cvInitFont(&font,CV_FONT_HERSHEY_PLAIN,0.55,0.7, 0,1,CV_AA);

	int chw = 6, chh = 10;
	char text[16];

	// y max
	if ((y_max - y_ref) > 0.05 * (y_max - y_min))
	{
		snprintf(text, sizeof(text)-1, "%.1f", y_max);
		cvPutText(output, text, cvPoint(bs / 5, bs - chh / 2), &font, text_color);
	}
	// y min
	if ((y_ref - y_min) > 0.05 * (y_max - y_min))
	{
		snprintf(text, sizeof(text)-1, "%.1f", y_min);
		cvPutText(output, text, cvPoint(bs / 5, h - bs + chh), &font, text_color);
	}

	// x axis
	snprintf(text, sizeof(text)-1, "%.1f", y_ref);
	cvPutText(output, text, cvPoint(bs / 5, x_axis_pos + chh / 2), &font, text_color);

	// Write the scale of the x axis
	snprintf(text, sizeof(text)-1, "%.0f", x_max );
	cvPutText(output, text, cvPoint(w - bs - strlen(text) * chw, x_axis_pos + chh), 
		      &font, text_color);

	// x min
	snprintf(text, sizeof(text)-1, "%.0f", x_min );
	cvPutText(output, text, cvPoint(bs, x_axis_pos + chh), 
		      &font, text_color);


}
Esempio n. 28
0
void cvRenderTracks(CvTracks const tracks, IplImage *imgSource, IplImage *imgDest, unsigned short mode, CvFont *font)
{
  CV_FUNCNAME("cvRenderTracks");
  __CV_BEGIN__;

  CV_ASSERT(imgDest&&(imgDest->depth==IPL_DEPTH_8U)&&(imgDest->nChannels==3));

  if ((mode&CV_TRACK_RENDER_ID)&&(!font))
  {
    if (!defaultFont)
    {
  font = defaultFont = new CvFont;
  cvInitFont(font, CV_FONT_HERSHEY_DUPLEX, 0.5, 0.5, 0, 1);
  // Other fonts:
  //   CV_FONT_HERSHEY_SIMPLEX, CV_FONT_HERSHEY_PLAIN,
  //   CV_FONT_HERSHEY_DUPLEX, CV_FONT_HERSHEY_COMPLEX,
  //   CV_FONT_HERSHEY_TRIPLEX, CV_FONT_HERSHEY_COMPLEX_SMALL,
  //   CV_FONT_HERSHEY_SCRIPT_SIMPLEX, CV_FONT_HERSHEY_SCRIPT_COMPLEX
    }
    else
  font = defaultFont;
  }

  if (mode)
  {
    for (CvTracks::const_iterator it=tracks.begin(); it!=tracks.end(); ++it)
    {
  if (mode&CV_TRACK_RENDER_ID)
    if (!it->second->inactive)
    {
      stringstream buffer;
      buffer << it->first;
      cvPutText(imgDest, buffer.str().c_str(), cvPoint((int)it->second->centroid.x, (int)it->second->centroid.y), font, CV_RGB(255.,255.,0.));
    }

  if (mode&CV_TRACK_RENDER_BOUNDING_BOX)
    if (it->second->inactive)
      cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 0., 50.));
    else
      cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 255., 0.));

  if (mode&CV_TRACK_RENDER_TO_LOG)
  {
    clog << "Track " << it->second->id << endl;
    if (it->second->inactive)
      clog << " - Inactive for " << it->second->inactive << " frames" << endl;
    else
      clog << " - Associated with blob " << it->second->label << endl;
    clog << " - Lifetime " << it->second->lifetime << endl;
    clog << " - Active " << it->second->active << endl;
    clog << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
    clog << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
    clog << endl;
  }

  if (mode&CV_TRACK_RENDER_TO_STD)
  {
    cout << "Track " << it->second->id << endl;
    if (it->second->inactive)
      cout << " - Inactive for " << it->second->inactive << " frames" << endl;
    else
      cout << " - Associated with blobs " << it->second->label << endl;
    cout << " - Lifetime " << it->second->lifetime << endl;
    cout << " - Active " << it->second->active << endl;
    cout << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
    cout << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
    cout << endl;
  }
    }
  }

  __CV_END__;
}
Esempio n. 29
0
  unsigned int cvLabel (IplImage const *img, IplImage *imgOut, CvBlobs &blobs)
  {
    CV_FUNCNAME("cvLabel");
    __CV_BEGIN__;
    {
      CV_ASSERT(img&&(img->depth==IPL_DEPTH_8U)&&(img->nChannels==1));
      CV_ASSERT(imgOut&&(imgOut->depth==IPL_DEPTH_LABEL)&&(imgOut->nChannels==1));

      unsigned int numPixels=0;

      cvSetZero(imgOut);

      CvLabel label=0;
      cvReleaseBlobs(blobs);

      unsigned int stepIn = img->widthStep / (img->depth / 8);
      unsigned int stepOut = imgOut->widthStep / (imgOut->depth / 8);
      unsigned int imgIn_width = img->width;
      unsigned int imgIn_height = img->height;
      unsigned int imgIn_offset = 0;
      // unsigned int imgOut_width = imgOut->width; // Unused but set
      // unsigned int imgOut_height = imgOut->height; // Unused but set
      unsigned int imgOut_offset = 0;
      if(img->roi)
      {
	imgIn_width = img->roi->width;
	imgIn_height = img->roi->height;
	imgIn_offset = img->roi->xOffset + (img->roi->yOffset * stepIn);
      }
      if(imgOut->roi)
      {
	// imgOut_width = imgOut->roi->width;  // Unused but set
	// imgOut_height = imgOut->roi->height;  // Unused but set
	imgOut_offset = imgOut->roi->xOffset + (imgOut->roi->yOffset * stepOut);
      }

      unsigned char *imgDataIn = (unsigned char *)img->imageData + imgIn_offset;
      CvLabel *imgDataOut = (CvLabel *)imgOut->imageData + imgOut_offset;

#define imageIn(X, Y) imgDataIn[(X) + (Y)*stepIn]
#define imageOut(X, Y) imgDataOut[(X) + (Y)*stepOut]

      CvLabel lastLabel = 0;
      CvBlob *lastBlob = NULL;

      for (unsigned int y=0; y<imgIn_height; y++)
      {
	for (unsigned int x=0; x<imgIn_width; x++)
	{
	  if (imageIn(x, y))
	  {
	    bool labeled = imageOut(x, y);

	    if ((!imageOut(x, y))&&((y==0)||(!imageIn(x, y-1))))
	    {
	      labeled = true;

	      // Label contour.
	      label++;
	      CV_ASSERT(label!=CV_BLOB_MAX_LABEL);

	      imageOut(x, y) = label;
	      numPixels++;

	      // XXX This is not necessary at all. I only do this for consistency.
	      if (y>0)
		imageOut(x, y-1) = CV_BLOB_MAX_LABEL;

	      CvBlob *blob = new CvBlob;
	      blob->label = label;
	      blob->area = 1;
	      blob->minx = x; blob->maxx = x;
	      blob->miny = y; blob->maxy = y;
	      blob->m10=x; blob->m01=y;
	      blob->m11=x*y;
	      blob->m20=x*x; blob->m02=y*y;
	      blob->internalContours.clear();
	      blobs.insert(CvLabelBlob(label,blob));

              lastLabel = label;
	      lastBlob = blob;

	      blob->contour.startingPoint = cvPoint(x, y);

	      unsigned char direction=1;
	      unsigned int xx = x;
	      unsigned int yy = y;


	      bool contourEnd = false;

	      do
	      {
		for (unsigned int numAttempts=0; numAttempts<3; numAttempts++)
		{
		  bool found = false;

		  for (unsigned char i=0; i<3; i++)
		  {
		    unsigned int nx = xx + movesE[direction][i][0];
		    unsigned int ny = yy + movesE[direction][i][1];
		    if ((nx < imgIn_width) && (nx >= 0) && (ny < imgIn_height) && (ny >= 0))
		    {
		      if (imageIn(nx, ny))
		      {
			found = true;

			blob->contour.chainCode.push_back(movesE[direction][i][3]);

			xx = nx;
			yy = ny;

			direction = movesE[direction][i][2];
			break;
		      }
		      else
		      {
			imageOut(nx, ny) = CV_BLOB_MAX_LABEL;
		      }
		    }
		  }

		  if (!found)
		    direction = (direction + 1) % 4;
		  else
		  {
		    if (imageOut(xx, yy) != label)
		    {
		      imageOut(xx, yy) = label;
		      numPixels++;

		      if (xx < blob->minx) blob->minx = xx;
		      else if (xx > blob->maxx) blob->maxx = xx;
		      if (yy < blob->miny) blob->miny = yy;
		      else if (yy > blob->maxy) blob->maxy = yy;

		      blob->area++;
		      blob->m10 += xx; blob->m01 += yy;
		      blob->m11 += xx*yy;
		      blob->m20 += xx*xx; blob->m02 += yy * yy;
		    }

		    break;
		  }
		  
		  if ( (contourEnd = ((xx == x) && (yy == y) && (direction == 1))) )
		    break;
		}
	      }
	      while (!contourEnd);

	    }

	    if ((y + 1 < imgIn_height) && (!imageIn(x, y + 1)) && (!imageOut(x, y + 1)))
	    {
	      labeled = true;

	      // Label internal contour
	      CvLabel l;
	      CvBlob *blob = NULL;

	      if (!imageOut(x, y))
	      {
		/*if (!imageOut(x-1, y))
		{
		  cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl;
		  continue;
		}*/

		l = imageOut(x - 1, y);

		imageOut(x, y) = l;
		numPixels++;

                if (l==lastLabel)
                  blob = lastBlob;
                else
                {
                  blob = blobs.find(l)->second;
                  lastLabel = l;
                  lastBlob = blob;
                }
		blob->area++;
		blob->m10 += x; blob->m01 += y;
		blob->m11 += x*y;
		blob->m20 += x*x; blob->m02 += y*y;
	      }
	      else
	      {
		l = imageOut(x, y);

                if (l==lastLabel)
                  blob = lastBlob;
                else
                {
                  blob = blobs.find(l)->second;
                  lastLabel = l;
                  lastBlob = blob;
                }
	      }

	      // XXX This is not necessary (I believe). I only do this for consistency.
	      imageOut(x, y + 1) = CV_BLOB_MAX_LABEL;

	      CvContourChainCode *contour = new CvContourChainCode;
	      contour->startingPoint = cvPoint(x, y);

	      unsigned char direction = 3;
	      unsigned int xx = x;
	      unsigned int yy = y;

	      do
	      {
		for (unsigned int numAttempts=0; numAttempts<3; numAttempts++)
		{
		  bool found = false;

		  for (unsigned char i=0; i<3; i++)
		  {
		    unsigned int nx = xx + movesI[direction][i][0];
		    unsigned int ny = yy + movesI[direction][i][1];
		    if (imageIn(nx, ny))
		    {
		      found = true;

		      contour->chainCode.push_back(movesI[direction][i][3]);

		      xx=nx;
		      yy=ny;

		      direction=movesI[direction][i][2];
		      break;
		    }
		    else
		    {
		      imageOut(nx, ny) = CV_BLOB_MAX_LABEL;
		    }
		  }

		  if (!found)
		    direction=(direction+1)%4;
		  else
		  {
		    if (!imageOut(xx, yy))
		    {
		      imageOut(xx, yy) = l;
		      numPixels++;

		      blob->area++;
		      blob->m10 += xx; blob->m01 += yy;
		      blob->m11 += xx*yy;
		      blob->m20 += xx*xx; blob->m02 += yy*yy;
		    }

		    break;
		  }
		}
	      }
	      while (!(xx==x && yy==y));

	      blob->internalContours.push_back(contour);
	    }

	    //else if (!imageOut(x, y))
	    if (!labeled)
	    {
	      // Internal pixel
	      CvLabel l = imageOut(x-1, y);

	      imageOut(x, y) = l;
	      numPixels++;

	      CvBlob *blob = NULL;
              if (l==lastLabel)
                blob = lastBlob;
              else
              {
                blob = blobs.find(l)->second;
                lastLabel = l;
                lastBlob = blob;
              }
	      blob->area++;
	      blob->m10 += x; blob->m01 += y;
	      blob->m11 += x*y;
	      blob->m20 += x*x; blob->m02 += y*y;
	    }
	  }
	}
      }

      for (CvBlobs::iterator it=blobs.begin(); it!=blobs.end(); ++it)
      {
	cvCentroid((*it).second);

        (*it).second->u11 = (*it).second->m11 - ((*it).second->m10*(*it).second->m01)/(*it).second->m00;
        (*it).second->u20 = (*it).second->m20 - ((*it).second->m10*(*it).second->m10)/(*it).second->m00;
        (*it).second->u02 = (*it).second->m02 - ((*it).second->m01*(*it).second->m01)/(*it).second->m00;

        double m00_2 = (*it).second->m00 * (*it).second->m00;

        (*it).second->n11 = (*it).second->u11 / m00_2;
        (*it).second->n20 = (*it).second->u20 / m00_2;
        (*it).second->n02 = (*it).second->u02 / m00_2;

        (*it).second->p1 = (*it).second->n20 + (*it).second->n02;

        double nn = (*it).second->n20 - (*it).second->n02;
        (*it).second->p2 = nn*nn + 4.*((*it).second->n11*(*it).second->n11);
      }

      return numPixels;

    }
    __CV_END__;
    
    return 0; // Remove return warning
  }
Esempio n. 30
0
int main(int argc, char** argv) {

	///////////////////////////////////////////////////////////////////
	// init phase

	std::string inputFilename;
	bool dataset = false;
	for (int i = 1; i < argc; ++i)
	{
		if (std::string(argv[i]) == "-i" && i + 1 < argc) {
			inputFilename = argv[++i];
		}
		if (std::string(argv[i]) == "-d" && i + 1 < argc) {
			dataset = true;
			inputFilename = argv[++i];
		}
		else {
			std::cerr << "Usage: " << argv[0] << " -i [input file]" << std::endl;
			return EXIT_FAILURE;
		}
	}

	std::vector<cv::Mat> images;
	if (dataset) {
		std::ifstream in;
		in.open(inputFilename);

		std::string filename;
		while (!in.eof()) {
			std::getline(in, filename);
			cv::Mat image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);

			if (!image.data)
			{
				std::cerr << "Could not open or find the image: " << filename << std::endl;				
			}

			images.push_back(image);
		}
	}
	else {
		cv::Mat image = cv::imread(inputFilename.c_str(), CV_LOAD_IMAGE_COLOR);

		if (!image.data)
		{
			std::cerr << "Could not open or find the image: " << inputFilename << std::endl;			
		}

		images.push_back(image);
	}	

	SimpleWaldboost w;
	w.init();

	for (std::vector<cv::Mat>::const_iterator it = images.begin(); it != images.end(); ++it)
	{
		cv::Mat image = *it;

		w.setImage(image);
		w.createPyramids(cvSize(320, 240), cvSize(48, 48), 8, 4);

		// preloaded classfier, we dont have to load it from xml
		// and we do not support it anyways
		w.setClassifier(&detector);

		///////////////////////////////////////////////////////////////////
		// detection phase	

		std::vector<Detection> detections;
		int n = w.detect(&detections);

		// draw rectangles in place of detections
		for (std::vector<Detection>::const_iterator it = detections.begin(); it != detections.end(); ++it)
		{
			cv::rectangle(image, cvPoint(it->x, it->y), cvPoint(it->x + it->width, it->y + it->height), CV_RGB(0, 0, 0), 3);
			cv::rectangle(image, cvPoint(it->x, it->y), cvPoint(it->x + it->width, it->y + it->height), CV_RGB(255, 255, 255));
		}

		cv::imshow("Result", image);
		cv::waitKey();
	}
		
	return EXIT_SUCCESS;
}