示例#1
0
文件: testseq.cpp 项目: DevShah/18551
static CvTestSeqElem* icvTestSeqReadElemOne(CvTestSeq_* pTS, CvFileStorage* fs, CvFileNode* node)
{
    int             noise_type = CV_NOISE_NONE;;
    CvTestSeqElem*  pElem = NULL;
    const char*     pVideoName = cvReadStringByName( fs, node,"Video", NULL);
    const char*     pVideoObjName = cvReadStringByName( fs, node,"VideoObj", NULL);

    if(pVideoName)
    {   /* Check to noise flag: */
        if( cv_stricmp(pVideoName,"noise_gaussian") == 0 ||
            cv_stricmp(pVideoName,"noise_normal") == 0) noise_type = CV_NOISE_GAUSSIAN;
        if( cv_stricmp(pVideoName,"noise_uniform") == 0) noise_type = CV_NOISE_UNIFORM;
        if( cv_stricmp(pVideoName,"noise_speckle") == 0) noise_type = CV_NOISE_SPECKLE;
        if( cv_stricmp(pVideoName,"noise_salt_and_pepper") == 0) noise_type = CV_NOISE_SALT_AND_PEPPER;
    }

    if((pVideoName || pVideoObjName ) && noise_type == CV_NOISE_NONE)
    {   /* Read other elements: */
        if(pVideoName) pElem = icvTestSeqReadElemAll(pTS, fs, pVideoName);
        if(pVideoObjName)
        {
            CvTestSeqElem* pE;
            pElem = icvTestSeqReadElemAll(pTS, fs, pVideoObjName);
            for(pE=pElem;pE;pE=pE->next)
            {
                pE->ObjID = pTS->ObjNum;
                pE->pObjName = pVideoObjName;
            }
            pTS->ObjNum++;
        }
    }   /* Read other elements. */
    else
    {   /* Create new element: */
        CvFileNode* pPosNode = cvGetFileNodeByName( fs, node,"Pos");
        CvFileNode* pSizeNode = cvGetFileNodeByName( fs, node,"Size");
        int AutoSize = (pSizeNode && CV_NODE_IS_STRING(pSizeNode->tag) && cv_stricmp("auto",cvReadString(pSizeNode,""))==0);
        int AutoPos = (pPosNode && CV_NODE_IS_STRING(pPosNode->tag) && cv_stricmp("auto",cvReadString(pPosNode,""))==0);
        const char* pFileName = cvReadStringByName( fs, node,"File", NULL);
        pElem = (CvTestSeqElem*)cvAlloc(sizeof(CvTestSeqElem));
        memset(pElem,0,sizeof(CvTestSeqElem));

        pElem->ObjID = -1;
        pElem->noise_type = noise_type;
        cvRandInit( &pElem->rnd_state, 1, 0, 0,CV_RAND_NORMAL);

        if(pFileName && pElem->noise_type == CV_NOISE_NONE)
        {   /* If AVI or BMP: */
            size_t  l = strlen(pFileName);
            pElem->pFileName = pFileName;

            pElem->type = SRC_TYPE_IMAGE;
            if(cv_stricmp(".avi",pFileName+l-4) == 0)pElem->type = SRC_TYPE_AVI;

            if(pElem->type == SRC_TYPE_IMAGE)
            {
                //pElem->pImg = cvLoadImage(pFileName);
                if(pElem->pImg)
                {
                    pElem->FrameNum = 1;
                    if(pElem->pImgMask)cvReleaseImage(&(pElem->pImgMask));

                    pElem->pImgMask = cvCreateImage(
                        cvSize(pElem->pImg->width,pElem->pImg->height),
                        IPL_DEPTH_8U,1);
                    icvTestSeqCreateMask(pElem->pImg,pElem->pImgMask,FG_BG_THRESHOLD);
                }
            }

            if(pElem->type == SRC_TYPE_AVI && pFileName)
            {
                //pElem->pAVI = cvCaptureFromFile(pFileName);

                if(pElem->pAVI)
                {
                    IplImage* pImg = 0;//cvQueryFrame(pElem->pAVI);
                    pElem->pImg = cvCloneImage(pImg);
                    pElem->pImg->origin = 0;
                    //cvSetCaptureProperty(pElem->pAVI,CV_CAP_PROP_POS_FRAMES,0);
                    pElem->FrameBegin = 0;
                    pElem->AVILen = pElem->FrameNum = 0;//(int)cvGetCaptureProperty(pElem->pAVI, CV_CAP_PROP_FRAME_COUNT);
                    //cvReleaseCapture(&pElem->pAVI);
                    pElem->pAVI = NULL;
                }
                else
                {
                    printf("WARNING!!! Cannot open avi file %s\n",pFileName);
                }
            }

        }   /* If AVI or BMP. */

        if(pPosNode)
        {   /* Read positions: */
            if(CV_NODE_IS_SEQ(pPosNode->tag))
            {
                int num = pPosNode->data.seq->total;
                pElem->pPos = (CvPoint2D32f*)cvAlloc(sizeof(float)*num);
                cvReadRawData( fs, pPosNode, pElem->pPos, "f" );
                pElem->PosNum = num/2;
                if(pElem->FrameNum == 0) pElem->FrameNum = pElem->PosNum;
            }
        }

        if(pSizeNode)
        {   /* Read sizes: */
            if(CV_NODE_IS_SEQ(pSizeNode->tag))
            {
                int num = pSizeNode->data.seq->total;
                pElem->pSize = (CvPoint2D32f*)cvAlloc(sizeof(float)*num);
                cvReadRawData( fs, pSizeNode, pElem->pSize, "f" );
                pElem->SizeNum = num/2;
            }
        }

        if(AutoPos || AutoSize)
        {   /* Auto size and pos: */
            int     i;
            int     num = (pElem->type == SRC_TYPE_AVI)?pElem->AVILen:1;
            if(AutoSize)
            {
                pElem->pSize = (CvPoint2D32f*)cvAlloc(sizeof(CvPoint2D32f)*num);
                pElem->SizeNum = num;
            }
            if(AutoPos)
            {
                pElem->pPos = (CvPoint2D32f*)cvAlloc(sizeof(CvPoint2D32f)*num);
                pElem->PosNum = num;
            }

            for(i=0; i<num; ++i)
            {
                IplImage* pFG = NULL;
                CvPoint2D32f* pPos = AutoPos?(pElem->pPos + i):NULL;
                CvPoint2D32f* pSize = AutoSize?(pElem->pSize + i):NULL;

                icvTestSeqQureyFrameElem(pElem,i);
                pFG = pElem->pImgMask;

                if(pPos)
                {
                    pPos->x = 0.5f;
                    pPos->y = 0.5f;
                }
                if(pSize)
                {
                    pSize->x = 0;
                    pSize->y = 0;
                }

                if(pFG)
                {
                    double      M00;
                    CvMoments   m;
                    cvMoments( pElem->pImgMask, &m, 0 );
                    M00 = cvGetSpatialMoment( &m, 0, 0 );

                    if(M00 > 0 && pSize )
                    {
                        double X = cvGetSpatialMoment( &m, 1, 0 )/M00;
                        double Y = cvGetSpatialMoment( &m, 0, 1 )/M00;
                        double XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X;
                        double YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y;
                        pSize->x = (float)(4*sqrt(XX))/(pElem->pImgMask->width-1);
                        pSize->y = (float)(4*sqrt(YY))/(pElem->pImgMask->height-1);
                    }

                    if(M00 > 0 && pPos)
                    {
                        pPos->x = (float)(cvGetSpatialMoment( &m, 1, 0 )/(M00*(pElem->pImgMask->width-1)));
                        pPos->y = (float)(cvGetSpatialMoment( &m, 0, 1 )/(M00*(pElem->pImgMask->height-1)));
                    }

                    if(pPos)
                    {   /* Another way to calculate y pos
                         * using object median:
                         */
                        int y0=0, y1=pFG->height-1;
                        for(y0=0; y0<pFG->height; ++y0)
                        {
                            CvMat       tmp;
                            CvScalar    s = cvSum(cvGetRow(pFG, &tmp, y0));
                            if(s.val[0] > 255*7) break;
                        }

                        for(y1=pFG->height-1; y1>0; --y1)
                        {
                            CvMat tmp;
                            CvScalar s = cvSum(cvGetRow(pFG, &tmp, y1));
                            if(s.val[0] > 255*7) break;
                        }

                        pPos->y = (y0+y1)*0.5f/(pFG->height-1);
                    }
                }   /* pFG */
            }   /* Next frame. */

            //if(pElem->pAVI) cvReleaseCapture(&pElem->pAVI);

            pElem->pAVI = NULL;

        }   /* End auto position creation. */
    }   /*  Create new element. */

    if(pElem)
    {   /* Read transforms and: */
        int             FirstFrame, LastFrame;
        CvTestSeqElem*  p=pElem;
        CvFileNode*     pTransNode = NULL;
        CvFileNode*     pS = NULL;
        int             ShiftByPos = 0;
        int             KeyFrames[1024];
        CvSeq*          pTransSeq = NULL;
        int             KeyFrameNum = 0;

        pTransNode = cvGetFileNodeByName( fs, node,"Trans");

        while( pTransNode &&
               CV_NODE_IS_STRING(pTransNode->tag) &&
               cv_stricmp("auto",cvReadString(pTransNode,""))!=0)
        {   /* Trans is reference: */
            pTransNode = cvGetFileNodeByName( fs, NULL,cvReadString(pTransNode,""));
        }

        pS = cvGetFileNodeByName( fs, node,"Shift");
        ShiftByPos = 0;
        pTransSeq = pTransNode?(CV_NODE_IS_SEQ(pTransNode->tag)?pTransNode->data.seq:NULL):NULL;
        KeyFrameNum = pTransSeq?pTransSeq->total:1;

        if(   (pS && CV_NODE_IS_STRING(pS->tag) && cv_stricmp("auto",cvReadString(pS,""))==0)
            ||(pTransNode && CV_NODE_IS_STRING(pTransNode->tag) && cv_stricmp("auto",cvReadString(pTransNode,""))==0))
        {
            ShiftByPos = 1;
        }

        FirstFrame = pElem->FrameBegin;
        LastFrame = pElem->FrameBegin+pElem->FrameNum-1;

        /* Calculate length of video and reallocate
         * transformation array:
         */
        for(p=pElem; p; p=p->next)
        {
            int v;
            v = cvReadIntByName( fs, node, "BG", -1 );
            if(v!=-1)p->BG = v;
            v = cvReadIntByName( fs, node, "Mask", -1 );
            if(v!=-1)p->Mask = v;

            p->FrameBegin += cvReadIntByName( fs, node, "FrameBegin", 0 );
            p->FrameNum = cvReadIntByName( fs, node, "FrameNum", p->FrameNum );
            p->FrameNum = cvReadIntByName( fs, node, "Dur", p->FrameNum );
            {
                int lastFrame = cvReadIntByName( fs, node, "LastFrame", p->FrameBegin+p->FrameNum-1 );
                p->FrameNum = MIN(p->FrameNum,lastFrame - p->FrameBegin+1);
            }

            icvTestSeqAllocTrans(p);

            {   /* New range estimation: */
                int LF = p->FrameBegin+p->FrameNum-1;
                if(p==pElem || FirstFrame > p->FrameBegin)FirstFrame = p->FrameBegin;
                if(p==pElem || LastFrame < LF)LastFrame = LF;
            }   /* New range estimation. */
        }   /*  End allocate new transfrom array. */

        if(ShiftByPos)
        {
            for(p=pElem;p;p=p->next)
            {   /* Modify transformation to make autoshift: */
                int         i;
                int         num = p->FrameNum;
                assert(num <= p->TransNum);
                p->TransNum = MAX(1,num);

                for(i=0; i<num; ++i)
                {
                    CvTSTrans*  pT = p->pTrans+i;
                    //float   t = (num>1)?((float)i/(num-1)):0.0f;
                    float newx = p->pPos[i%p->PosNum].x;
                    float newy = p->pPos[i%p->PosNum].y;
                    pT->Shift.x = -newx*pT->Scale.x;
                    pT->Shift.y = -newy*pT->Scale.y;

                    if(p->pImg)
                    {
                        newx *= p->pImg->width-1;
                        newy *= p->pImg->height-1;
                    }

                    pT->T[2] = -(pT->T[0]*newx+pT->T[1]*newy);
                    pT->T[5] = -(pT->T[3]*newx+pT->T[4]*newy);
                }
            }   /* Modify transformation old. */
        }   /*  Next record. */

        /* Initialize frame number array: */
        KeyFrames[0] = FirstFrame;

        if(pTransSeq&&KeyFrameNum>1)
        {
            int i0,i1;
            for(int i=0; i<KeyFrameNum; ++i)
            {
                CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,i);
                KeyFrames[i] = cvReadIntByName(fs,pTN,"frame",-1);
            }

            if(KeyFrames[0]<0)KeyFrames[0]=FirstFrame;
            if(KeyFrames[KeyFrameNum-1]<0)KeyFrames[KeyFrameNum-1]=LastFrame;

            for(i0=0, i1=1; i1<KeyFrameNum;)
            {
                for(i1=i0+1; i1<KeyFrameNum && KeyFrames[i1]<0; i1++) {}

                assert(i1<KeyFrameNum);
                assert(i1>i0);

                for(int i=i0+1; i<i1; ++i)
                {
                    KeyFrames[i] = cvRound(KeyFrames[i0] + (float)(i-i0)*(float)(KeyFrames[i1] - KeyFrames[i0])/(float)(i1-i0));
                }
                i0 = i1;
                i1++;
            }   /* Next key run. */
        }   /*  Initialize frame number array. */

        if(pTransNode || pTransSeq)
        {   /* More complex transform. */
            int     param;
            CvFileNode* pTN = pTransSeq?(CvFileNode*)cvGetSeqElem(pTransSeq,0):pTransNode;

            for(p=pElem; p; p=p->next)
            {
                //int trans_num = p->TransNum;
                for(param=0; param_name[param]; ++param)
                {
                    const char*   name = param_name[param];
                    float   defv = param_defval[param];
                    if(KeyFrameNum==1)
                    {   /* Only one transform record: */
                        int     i;
                        double  val;
                        CvFileNode* fnode = cvGetFileNodeByName( fs, pTN,name);
                        if(fnode == NULL) continue;
                        val = cvReadReal(fnode,defv);

                        for(i=0; i<p->TransNum; ++i)
                        {
                            icvUpdateTrans(
                                p->pTrans+i, param, val,
                                p->pImg?(float)(p->pImg->width-1):1.0f,
                                p->pImg?(float)(p->pImg->height-1):1.0f);
                        }
                    }   /* Next record. */
                    else
                    {   /* Several transforms: */
                        int         i0,i1;
                        double      v0;
                        double      v1;

                        CvFileNode* pTN1 = (CvFileNode*)cvGetSeqElem(pTransSeq,0);
                        v0 = cvReadRealByName(fs, pTN1,name,defv);

                        for(i1=1,i0=0; i1<KeyFrameNum; ++i1)
                        {
                            int         f0,f1;
                            int         i;
                            CvFileNode* pTN2 = (CvFileNode*)cvGetSeqElem(pTransSeq,i1);
                            CvFileNode* pVN = cvGetFileNodeByName(fs,pTN2,name);

                            if(pVN)v1 = cvReadReal(pVN,defv);
                            else if(pVN == NULL && i1 == KeyFrameNum-1) v1 = defv;
                            else continue;

                            f0 = KeyFrames[i0];
                            f1 = KeyFrames[i1];

                            if(i1==(KeyFrameNum-1)) f1++;

                            for(i=f0; i<f1; ++i)
                            {
                                double   val;
                                double   t = (float)(i-f0);
                                int      li = i - p->FrameBegin;
                                if(li<0) continue;
                                if(li>= p->TransNum) break;
                                if(KeyFrames[i1]>KeyFrames[i0]) t /=(float)(KeyFrames[i1]-KeyFrames[i0]);
                                val = t*(v1-v0)+v0;

                                icvUpdateTrans(
                                    p->pTrans+li, param, val,
                                    p->pImg?(float)(p->pImg->width-1):1.0f,
                                    p->pImg?(float)(p->pImg->height-1):1.0f);

                            }   /* Next transform. */
                            i0 = i1;
                            v0 = v1;

                        }   /* Next value run. */
                    }   /*  Several transforms. */
                }   /*  Next parameter. */
            }   /*  Next record. */
        }   /*  More complex transform. */
    }   /*  Read transfroms. */

    return pElem;

}   /* icvTestSeqReadElemOne */
示例#2
0
int toCircleRad(const double dRad)
{
    return clip<int>(cvRound(dRad), 1, 255);
}
std::vector<cv::Vec3f> CircularSampleAreaDetector::detect(cv::Mat frame) {
  // Convert the image to grayscale
  cv::Mat frame_gray(frame);
  cv::cvtColor(frame, frame_gray, CV_BGR2GRAY);

  // cv::cvtColor(frame, frame_gray, CV_BGR2HSV);
  // std::vector<cv::Mat> channels;
  // cv::split(frame_gray, channels);
  // frame_gray = channels[2];

  // Blur to remove extraneous detail before edge detection
  // cv::medianBlur(frame_gray, frame_gray, 9);
  // cv::blur(frame_gray, frame_gray, cv::Size(3, 3));
  cv::GaussianBlur(frame_gray, frame_gray, cv::Size(9, 9), 2, 2);

  // cv::imshow("blur_win", frame_gray);

  // Edge detection
  // cv::adaptiveThreshold(frame_gray, frame_gray, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 11, 1);
  cv::Mat frame_canny;

  // int erosion_size = 2;
  // cv::Mat element = getStructuringElement(cv::MORPH_ELLIPSE,
  //     cv::Size( 2*erosion_size + 1, 2*erosion_size+1),
  //     cv::Point( erosion_size, erosion_size ));
  // cv::dilate(frame_gray, frame_gray, element );
  // cv::erode(frame_gray, frame_gray, element );

  // cv::Canny(frame_gray, frame_canny, 5, 50);
  // cv::imshow("canny_win", frame_canny);

  // Extract circle features
  std::vector<cv::Vec3f> circles;
  // HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT, 1, 50, 50, 40, 0, 0);
  HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT,
      2,   // inverse resolution ratio
      50,  // min dist between circle centers
      50,  // canny upper threshold
      150,  // center detection threshold
      0,   // min radius
      0    // max radius
    );
  // HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT,
  //     1,   // inverse resolution ratio
  //     50,  // min dist between circle centers
  //     50,  // canny upper threshold
  //     50,  // center detection threshold
  //     0,   // min radius
  //     0    // max radius
  //   );

  // Of the circles found, pick the one closest to the center of the frame
  // TODO: This is not the best way to do this. Research probabilistic methods?
  cv::Point frame_center(frame_gray.cols / 2, frame_gray.rows / 2);

  std::vector<cv::Vec3f> good_circles;
  for(size_t i = 0; i < circles.size(); i++) {
    cv::Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
    int radius = circles[i][2];

    // Ensure circle is entirely in screen
    if(center.x - radius < 0 || center.x + radius > frame_gray.cols
        || center.y - radius < 0 || center.y + radius > frame_gray.rows) {
      continue;
    }

    good_circles.push_back(cv::Vec3f(circles[i][0], circles[i][1], circles[i][2] * CIRCLE_SHRINK_FACTOR));
  }

  return good_circles;
}
int main(int argc, char *argv[]) {
    gflags::ParseCommandLineFlags(&argc, &argv, true);

    cv::vector<cv::Mat> lefts, rights;
    const cv::Size patternSize(9, 6);
    cv::vector<cv::vector<cv::Point3f>> worldPoints;
    cv::vector<cv::vector<cv::vector<cv::Point2f>>> imagePoints(2);

    for (size_t i = 0; i < 2; i++)
        imagePoints[i].resize(FLAGS_size);

    loadImages(lefts, rights, FLAGS_size);
    FLAGS_size = findChessboards(lefts, rights, imagePoints, patternSize, FLAGS_size);
    std::cout << "number of correct files = " << FLAGS_size << std::endl;
    setWorldPoints(worldPoints, patternSize, 0.024, FLAGS_size);

    std::cout << "calibrate stereo cameras" << std::endl;
    cv::vector<cv::Mat> cameraMatrix(2);
    cv::vector<cv::Mat> distCoeffs(2);
    cameraMatrix[0] = cv::Mat::eye(3, 3, CV_64FC1);
    cameraMatrix[1] = cv::Mat::eye(3, 3, CV_64FC1);
    distCoeffs[0] = cv::Mat(8, 1, CV_64FC1);
    distCoeffs[1] = cv::Mat(8, 1, CV_64FC1);
    cv::Mat R, T, E, F;

    double rms = stereoCalibrate(
            worldPoints, imagePoints[0], imagePoints[1], cameraMatrix[0],
            distCoeffs[0], cameraMatrix[1], distCoeffs[1], lefts[0].size(),
            R, T, E, F, cv::TermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 100, 1e-5),
            CV_CALIB_FIX_ASPECT_RATIO +
            CV_CALIB_ZERO_TANGENT_DIST +
            CV_CALIB_SAME_FOCAL_LENGTH +
            CV_CALIB_RATIONAL_MODEL +
            CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
    std::cout << "done with RMS error = " << rms << std::endl;

    double err = 0;
    int npoints = 0;
    for (int i = 0; i < FLAGS_size; i++) {
        int size = (int) imagePoints[0][i].size();
        cv::vector<cv::Vec3f> lines[2];
        cv::Mat imgpt[2];
        for (int k = 0; k < 2; k++) {
            imgpt[k] = cv::Mat(imagePoints[k][i]);
            cv::undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k],
                                cv::Mat(), cameraMatrix[k]);
            cv::computeCorrespondEpilines(imgpt[k], k + 1, F, lines[k]);
        }

        for (int j = 0; j < size; j++) {
            double errij =
                    std::fabs(imagePoints[0][i][j].x * lines[1][j][0] +
                              imagePoints[0][i][j].y * lines[1][j][1] +
                              lines[1][j][2]) +
                    std::fabs(imagePoints[1][i][j].x * lines[0][j][0] +
                              imagePoints[1][i][j].y * lines[0][j][1] +
                              lines[0][j][2]);
            err += errij;
        }
        npoints += size;
    }
    std::cout << "average reprojection error = " << err / npoints << std::endl;

    cv::Mat R1, R2, P1, P2, Q;
    cv::Rect validROI[2];
    stereoRectify(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
                  distCoeffs[1], lefts[0].size(), R, T, R1, R2, P1, P2, Q,
                  cv::CALIB_ZERO_DISPARITY, 1, lefts[0].size(),
                  &validROI[0], &validROI[1]);

    {
        cv::FileStorage fs(FLAGS_intrinsics.c_str(), cv::FileStorage::WRITE);
        if (fs.isOpened()) {
            fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0]
                << "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
            fs.release();
        }
    }

    cv::Mat rmap[2][2];
    cv::initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1,
                                lefts[0].size(),
                                CV_16SC2,
                                rmap[0][0], rmap[0][1]);
    cv::initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2,
                                lefts[0].size(),
                                CV_16SC2,
                                rmap[1][0], rmap[1][1]);

    {
        cv::FileStorage fs(FLAGS_extrinsics.c_str(), cv::FileStorage::WRITE);
        if (fs.isOpened()) {
            fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2
               << "P1" << P1 << "P2" << P2 << "Q" << Q
               << "V1" << validROI[0] << "V2" << validROI[1];
            fs.release();
        }
    }

    cv::Mat canvas;
    double sf;
    int w, h;

    sf = 600. / MAX(lefts[0].size().width, lefts[0].size().height);
    w = cvRound(lefts[0].size().width * sf);
    h = cvRound(lefts[0].size().height * sf);
    canvas.create(h, w * 2, CV_8UC3);

    cv::namedWindow("Rectified", CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);

    for (int i = 0; i < FLAGS_size; i++) {
        for (int k = 0; k < 2; k++) {
            if (k == 0) {
                cv::Mat img = lefts[i].clone(), rimg, cimg;
                cv::remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
                cv::cvtColor(rimg, cimg, CV_GRAY2BGR);
                cv::Mat canvasPart = canvas(cv::Rect(w * k, 0, w, h));
                cv::resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);

                cv::Rect vroi(cvRound(validROI[k].x * sf),
                              cvRound(validROI[k].y * sf),
                              cvRound(validROI[k].width * sf),
                              cvRound(validROI[k].height * sf));
                cv::rectangle(canvasPart, vroi, cv::Scalar(0, 0, 255), 3, 8);
            } else {
                cv::Mat img = rights[i].clone(), rimg, cimg;
                cv::remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
                cvtColor(rimg, cimg, CV_GRAY2BGR);
                cv::Mat canvasPart = canvas(cv::Rect(w * k, 0, w, h));
                cv::resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);

                cv::Rect vroi(cvRound(validROI[k].x * sf),
                              cvRound(validROI[k].y * sf),
                              cvRound(validROI[k].width * sf),
                              cvRound(validROI[k].height * sf));
                cv::rectangle(canvasPart, vroi, cv::Scalar(0, 0, 255), 3, 8);
            }
        }

        for (int j = 0; j < canvas.rows; j += 16)
            cv::line(canvas, cv::Point(0, j), cv::Point(canvas.cols, j),
                     cv::Scalar(0, 255, 0), 1, 8);

        cv::imshow("Rectified", canvas);

        if (cv::waitKey(0) == 'q')
            break;
    }

    cv::destroyAllWindows();
    return 0;
}
示例#5
0
	void Run()
	{
		int w, h;
		IplImage *pCapImage;
		PBYTE pCapBuffer = NULL;
		
        // Create camera instance
		_cam = CLEyeCreateCamera(_cameraGUID, _mode, _resolution, _fps);
		
        if(_cam == NULL)		return;
		
        // Get camera frame dimensions
		CLEyeCameraGetFrameDimensions(_cam, w, h);
		
        // Depending on color mode chosen, create the appropriate OpenCV image
		if(_mode == CLEYE_COLOR_PROCESSED || _mode == CLEYE_COLOR_RAW)
			pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 4);
		else
			pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);

		// Set some camera parameters
		//CLEyeSetCameraParameter(_cam, CLEYE_GAIN, 30);
		//CLEyeSetCameraParameter(_cam, CLEYE_EXPOSURE, 500);
        //CLEyeSetCameraParameter(_cam, CLEYE_AUTO_EXPOSURE, false);
        //CLEyeSetCameraParameter(_cam, CLEYE_AUTO_GAIN, false);
        //CLEyeSetCameraParameter(_cam, CLEYE_AUTO_WHITEBALANCE, false);
        //CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_RED, 100);
        //CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_BLUE, 200);
        //CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_GREEN, 200);

        

		// Start capturing
		CLEyeCameraStart(_cam);

		CvMemStorage* storage = cvCreateMemStorage(0);
		
        IplImage* hsv_frame = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 3);
        IplImage* thresholded   = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 1);
		IplImage* temp = cvCreateImage(cvSize(pCapImage->width >> 1, pCapImage->height >> 1), IPL_DEPTH_8U, 3);

        // Create a window in which the captured images will be presented
        cvNamedWindow( "Camera" , CV_WINDOW_AUTOSIZE );
        cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE );
        cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE );
 
        
 
        //int hl = 100, hu = 115, sl = 95, su = 135, vl = 115, vu = 200;
        int hl = 5, hu = 75, sl = 40, su = 245, vl = 105, vu = 175;
        

		// image capturing loop
		while(_running)
		{

            // Detect a red ball
            CvScalar hsv_min = cvScalar(hl, sl, vl, 0);
            CvScalar hsv_max = cvScalar(hu, su, vu, 0);

			cvGetImageRawData(pCapImage, &pCapBuffer);
			CLEyeCameraGetFrame(_cam, pCapBuffer);

			cvConvertImage(pCapImage, hsv_frame);

            // Get one frame
            if( !pCapImage )
            {
                    fprintf( stderr, "ERROR: frame is null...\n" );
                    getchar();
                    break;
            }
 
                // Covert color space to HSV as it is much easier to filter colors in the HSV color-space.
                cvCvtColor(pCapImage, hsv_frame, CV_RGB2HSV);
                // Filter out colors which are out of range.
                cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
 
                // Memory for hough circles
                CvMemStorage* storage = cvCreateMemStorage(0);
                // hough detector works better with some smoothing of the image
                cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 );
                CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 2,
                                                thresholded->height/4, 100, 50, 10, 400);
 
                for (int i = 0; i < circles->total; i++)
                {
                    float* p = (float*)cvGetSeqElem( circles, i );
                    //printf("Ball! x=%f y=%f r=%f\n\r",p[0],p[1],p[2] );
                    cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])),
                                            3, CV_RGB(0,255,0), -1, 8, 0 );
                    cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])),
                                            cvRound(p[2]), CV_RGB(255,0,0), 3, 8, 0 );
                }
 
                cvShowImage( "Camera", pCapImage ); // Original stream with detected ball overlay
                cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space
                cvShowImage( "EdgeDetection", thresholded ); // The stream after color filtering
 
                cvReleaseMemStorage(&storage);
 
                // Do not release the frame!
 
                //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
                //remove higher bits using AND operator
                int key = cvWaitKey(10);
                
                

                switch(key){
                    case 'q' : hu += 5; break;
                    case 'Q' : hu -= 5; break;
                    
                    case 'a' : hl -= 5; break;
                    case 'A' : hl += 5; break;
                    
                    case 'w' : su += 5; break;
                    case 'W' : su -= 5; break;
                    
                    case 's' : sl -= 5; break;
                    case 'S' : sl += 5; break;
                    
                    case 'e' : vu += 5; break;
                    case 'E' : vu -= 5; break;
                    
                    case 'd' : vl -= 5; break;
                    case 'D' : vl += 5; break;
                }

                if (key != -1){
                    printf("H: %i, S: %i, V: %i\nH: %i, S: %i, V: %i\n\n", hu, su, vu, hl, sl, vl);
                }
            
 

			
		}
		cvReleaseImage(&temp);
		cvReleaseImage(&pCapImage);

		// Stop camera capture
		CLEyeCameraStop(_cam);
		// Destroy camera object
		CLEyeDestroyCamera(_cam);
		// Destroy the allocated OpenCV image
		cvReleaseImage(&pCapImage);
		_cam = NULL;
	}
示例#6
0
template<> inline int saturate_cast<int>(double v)           { return cvRound(v); }
示例#7
0
template<> inline unsigned saturate_cast<unsigned>(double v) { return cvRound(v); }
示例#8
0
//
// Transform
// Transform the  sample 'in place'
//
HRESULT CKalmTrack::Transform(IMediaSample *pSample)
{
    BYTE*   pData;
    CvImage image;
    
    pSample->GetPointer(&pData);
    
    AM_MEDIA_TYPE* pType = &m_pInput->CurrentMediaType();
    VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER *) pType->pbFormat;
    
    // Get the image properties from the BITMAPINFOHEADER
    CvSize size = cvSize( pvi->bmiHeader.biWidth, pvi->bmiHeader.biHeight );
    int stride = (size.width * 3 + 3) & -4;

    cvInitImageHeader( &image, size, IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, 4 );
    cvSetImageData( &image, pData,stride );

    if(IsTracking == false)
    {
        if(IsInit == false)
        {
            CvPoint p1, p2;
            // Draw box
            p1.x = cvRound( size.width * m_params.x );
            p1.y = cvRound( size.height * m_params.y );

            p2.x = cvRound( size.width * (m_params.x + m_params.width));
            p2.y = cvRound( size.height * (m_params.y + m_params.height));

            CheckBackProject( &image );

            cvRectangle( &image, p1, p2, -1, 1 );
        }
        else
        {
            m_object.x = cvRound( size.width * m_params.x );
            m_object.y = cvRound( size.height * m_params.y );
            m_object.width = cvRound( size.width * m_params.width );
            m_object.height = cvRound( size.height * m_params.height );
            ApplyCamShift( &image, true );

            CheckBackProject( &image );

            IsTracking = true;
        }
    }
    else
    {
        cvKalmanUpdateByTime(Kalman);
        m_object.x = cvRound( Kalman->PriorState[0]-m_object.width*0.5);
        m_object.y = cvRound( Kalman->PriorState[2]-m_object.height*0.5 );
        
        ApplyCamShift( &image, false );

        CheckBackProject( &image );

        cvRectangle( &image,
                     cvPoint( m_object.x, m_object.y ),
                     cvPoint( m_object.x + m_object.width, m_object.y + m_object.height ),
                     -1, 1 );

        Rectang(&image,m_Indicat1,-1);
        m_X.x = 10;
        m_X.y = 10;
        m_X.width=50*m_Old.x/size.width;
        m_X.height =10;
        Rectang(&image,m_X,CV_RGB(0,0,255));
        m_Y.x = 10;
        m_Y.y = 10;
        m_Y.width=10;
        m_Y.height = 50*m_Old.y/size.height;
        Rectang(&image,m_Y,CV_RGB(255,0,0));
        m_Indicat2.x = 0; 
        m_Indicat2.y = size.height-50;
        m_Indicat2.width = 50;
        m_Indicat2.height = 50;
        Rectang(&image,m_Indicat2,-1);
        float Norm = cvSqrt(Measurement[1]*Measurement[1]+Measurement[3]*Measurement[3]);
        int VXNorm = (fabs(Measurement[1])>5)?(int)(12*Measurement[1]/Norm):0;
        int VYNorm = (fabs(Measurement[3])>5)?(int)(12*Measurement[3]/Norm):0;
        CvPoint pp1 = {25,size.height-25};
        CvPoint pp2 = {25+VXNorm,size.height-25+VYNorm};
        cvLine(&image,pp1,pp2,CV_RGB(0,0,0),3);
        /*CvPoint pp1 = {25,size.height-25};
        double angle = atan2( Measurement[3], Measurement[1] );
        CvPoint pp2 = {cvRound(25+12*cos(angle)),cvRound(size.height-25-12*sin(angle))};
        cvLine(&image,pp1,pp2,0,3);*/
    }

    cvSetImageData( &image, 0, 0 );

    return NOERROR;
} // Transform
示例#9
0
// This function is copied from http://mehrez.kristou.org/opencv-change-contrast-and-brightness-of-an-image/
boost::shared_ptr< Image > Image::ContrastBrightness( int contrast, int brightness ) const
{
	if(contrast > 100) contrast = 100;
	if(contrast < -100) contrast = -100;
	if(brightness > 100) brightness = 100;
	if(brightness < -100) brightness = -100;

	uchar lut[256];

	CvMat* lut_mat;
	int hist_size = 256;
	float range_0[]={0,256};
	float* ranges[] = { range_0 };
	int i;

	IplImage * dest = cvCloneImage(this);
	
	IplImage * GRAY;
	if (this->nChannels == 3)
	{
		GRAY = cvCreateImage(cvGetSize(this),this->depth,1);
		cvCvtColor(this,GRAY,CV_RGB2GRAY);
	}
	else
	{
		GRAY = cvCloneImage(this);
	}
    lut_mat = cvCreateMatHeader( 1, 256, CV_8UC1 );
    cvSetData( lut_mat, lut, 0 );
	/*
     * The algorithm is by Werner D. Streidt
     * (http://visca.com/ffactory/archives/5-99/msg00021.html)
     */
	if( contrast > 0 )
    {
        double delta = 127.* contrast/100;
        double a = 255./(255. - delta*2);
        double b = a*(brightness - delta);
        for( i = 0; i < 256; i++ )
        {
            int v = cvRound(a*i + b);

            if( v < 0 )
                v = 0;
            if( v > 255 )
                v = 255;
            lut[i] = v;
        }
    }
    else
    {
        double delta = -128.* contrast/100;
        double a = (256.-delta*2)/255.;
        double b = a* brightness + delta;
        for( i = 0; i < 256; i++ )
        {
            int v = cvRound(a*i + b);
            if( v < 0 )
                v = 0;

            if( v > 255 )
                v = 255;
            lut[i] = v;
        }
    }
	if (this->nChannels ==3)
	{
		IplImage * R = cvCreateImage(cvGetSize(this),this->depth,1);
		IplImage * G = cvCreateImage(cvGetSize(this),this->depth,1);
		IplImage * B = cvCreateImage(cvGetSize(this),this->depth,1);
		cvCvtPixToPlane(this,R,G,B,NULL);
		cvLUT( R, R, lut_mat );
		cvLUT( G, G, lut_mat );
		cvLUT( B, B, lut_mat );
		cvCvtPlaneToPix(R,G,B,NULL,dest);
		cvReleaseImage(&R);
		cvReleaseImage(&G);
		cvReleaseImage(&B);
	}
	else
	{
		cvLUT( GRAY, dest, lut_mat );
	}
	cvReleaseImage(&GRAY);
	cvReleaseMat( &lut_mat);
	
	return boost::shared_ptr< Image >( new Image( dest, true ) );
}
void process_image(){







///////////////////////////////////////////////////////
//////////////////// PUPIL/////////////////////////////
///////////////////////////////////////////////////////

int numBins = 256;
float range[] = {0, 255};
float *ranges[] = { range };
 
 CvHistogram *hist = cvCreateHist(1, &numBins, CV_HIST_ARRAY, ranges, 1);
 cvClearHist(hist);


	cvCalcHist(&smooth, hist, 0, 0);
    IplImage* imgHist = DrawHistogram(hist,1,1);
    cvClearHist(hist);
	

//cvShowImage("hist", imgHist);



cvThreshold(smooth,pupil,50,255,CV_THRESH_BINARY);
//cvShowImage( "pupi_binary",pupil);

cvCanny(pupil,pedge,40,50);
//cvShowImage( "pupil_edge",pedge);


//////////////////////////////////////////////////////////
//////////////////////IRIS////////////////////////////////
//////////////////////////////////////////////////////////

//cvEqualizeHist(smooth,smooth);
//cvShowImage("Equalized",smooth);

cvThreshold(smooth,iris,100,255,CV_THRESH_BINARY); //115
//cvShowImage( "iris_binary",iris);


//cvSobel(iris,iedge,1,0,3);
cvCanny(iris,iedge,1,255);
//cvShowImage( "iris_edge",iedge);


/////////////////////////////////////////////////////////
///////////////////////Eyelids///////////////////////////
/////////////////////////////////////////////////////////



cvThreshold(smooth,eyelid_mask,150,255,CV_THRESH_OTSU);
cvNot(eyelid_mask,eyelid_mask);
//cvShowImage("eyelid",eyelid_mask);



//cvAdaptiveThreshold(smooth,contour,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,9,1);

//cvThreshold(smooth,contour,130,255,CV_THRESH_BINARY);
//cvShowImage( "contour",contour);


//CvSeq* firstContour = NULL;
//CvMemStorage* cstorage = cvCreateMemStorage(0);
//cvFindContours(con, cstorage, &firstContour,sizeof(CvContour), CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
//cvDrawContours(dst,firstContour,CV_RGB(0,255,0),CV_RGB(0,0,255),10,2,8);



CvMemStorage* storage_pupil = cvCreateMemStorage(0);

CvSeq* presults = cvHoughCircles(pedge,storage_pupil,CV_HOUGH_GRADIENT,2,src->width,255,1);
for( int i = 0; i < presults->total; i++ )
{
float* p = (float*) cvGetSeqElem( presults, i );
CvPoint pt = cvPoint( cvRound( p[0] ),cvRound( p[1] ) );

xp=cvRound( p[0] );
yp=cvRound( p[1] );
rp=p[2];

cvCircle(dst,pt,cvRound( p[2] ),CV_RGB(0,255,255),1,400);


xroi= xp-shift;
yroi= yp-shift;

cvRectangle(dst,cvPoint(( p[0] )-shift,p[1]-shift),cvPoint(( p[0] )+shift,p[1]+shift),CV_RGB(255,0,255), 1);

CvRect roi= cvRect(xroi  ,yroi,shift*2,shift*2);



cvSetImageROI( iedge, roi );


//cvShowImage("ROI",iedge);


}
////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////




///////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////

CvMemStorage* storage_iris = cvCreateMemStorage(0);

CvSeq* iresults = cvHoughCircles(iedge,storage_iris,CV_HOUGH_GRADIENT,2,src->width,1,50,50);
for( int i = 0; i < iresults->total; i++ )
{
float* p = (float*) cvGetSeqElem( iresults, i );

CvPoint pt = cvPoint( cvRound( p[0] )+xroi,cvRound( p[1] )+yroi );
cvCircle(dst,pt,cvRound( p[2] ),CV_RGB(255,0,0),1,400);


xi=cvRound( p[0] )+xroi;
yi=cvRound( p[1] )+yroi;
ri=(p[2]);


cvCircle(iris_mask,pt,cvRound( p[2] ),CV_RGB(255, 255, 255),-1, 8, 0);
//cvShowImage("iris_mask",iris_mask);


}
///////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
///////////////////////////////////////////


cvResetImageROI(iedge);

cvAnd(dst,dst,res,iris_mask);
//cvShowImage("iris_mask",res);

cvAnd(res,res, mask, eyelid_mask);
//cvShowImage("Mask",mask);



//cvLogPolar(mask,finalres,cvPoint2D32f (xp,yp),100, CV_INTER_LINEAR  );
//cvShowImage("Final Result",finalres);


/////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////
/*




*/



}
示例#11
0
DMZ_INTERNAL CvLinePolar llcv_hough(const CvArr *src_image, IplImage *dx, IplImage *dy, float rho, float theta, int threshold, float theta_min, float theta_max, bool vertical, float gradient_angle_threshold) {
    CvMat img_stub, *img = (CvMat*)src_image;
    img = cvGetMat(img, &img_stub);

    CvMat dx_stub, *dx_mat = (CvMat*)dx;
    dx_mat = cvGetMat(dx_mat, &dx_stub);

    CvMat dy_stub, *dy_mat = (CvMat*)dy;
    dy_mat = cvGetMat(dy_mat, &dy_stub);

    if(!CV_IS_MASK_ARR(img)) {
      CV_Error(CV_StsBadArg, "The source image must be 8-bit, single-channel");
    }

    if(rho <= 0 || theta <= 0 || threshold <= 0) {
      CV_Error(CV_StsOutOfRange, "rho, theta and threshold must be positive");
    }

    if(theta_max < theta_min + theta) {
      CV_Error(CV_StsBadArg, "theta + theta_min (param1) must be <= theta_max (param2)");
    }

    cv::AutoBuffer<int> _accum;
    cv::AutoBuffer<int> _tabSin, _tabCos;

    const uchar* image;
    int step, width, height;
    int numangle, numrho;
    float ang;
    int r, n;
    int i, j;
    float irho = 1 / rho;
    float scale;

    CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );

    image = img->data.ptr;
    step = img->step;
    width = img->cols;
    height = img->rows;

    const uint8_t *dx_mat_ptr = (uint8_t *)(dx_mat->data.ptr);
    int dx_step = dx_mat->step;
    const uint8_t *dy_mat_ptr = (uint8_t *)(dy_mat->data.ptr);
    int dy_step = dy_mat->step;

    numangle = cvRound((theta_max - theta_min) / theta);
    numrho = cvRound(((width + height) * 2 + 1) / rho);

    _accum.allocate((numangle+2) * (numrho+2));
    _tabSin.allocate(numangle);
    _tabCos.allocate(numangle);
    int *accum = _accum;
    int *tabSin = _tabSin, *tabCos = _tabCos;
    
    memset(accum, 0, sizeof(accum[0]) * (numangle + 2) * (numrho + 2));

#define FIXED_POINT_EXPONENT 10
#define FIXED_POINT_MULTIPLIER (1 << FIXED_POINT_EXPONENT)

    for(ang = theta_min, n = 0; n < numangle; ang += theta, n++) {
        tabSin[n] = (int)floorf(FIXED_POINT_MULTIPLIER * sinf(ang) * irho);
        tabCos[n] = (int)floorf(FIXED_POINT_MULTIPLIER * cosf(ang) * irho);
    }

    float slope_bound_a, slope_bound_b;
    if(vertical) {
        slope_bound_a = tanf((float)TO_RADIANS(180 - gradient_angle_threshold));
        slope_bound_b = tanf((float)TO_RADIANS(180 + gradient_angle_threshold));
    } else {
        slope_bound_a = tanf((float)TO_RADIANS(90 - gradient_angle_threshold));
        slope_bound_b = tanf((float)TO_RADIANS(90 + gradient_angle_threshold));
    }

    // stage 1. fill accumulator
    for(i = 0; i < height; i++) {
        int16_t *dx_row_ptr = (int16_t *)(dx_mat_ptr + i * dx_step);
        int16_t *dy_row_ptr = (int16_t *)(dy_mat_ptr + i * dy_step);
        for(j = 0; j < width; j++) {
            if(image[i * step + j] != 0) {
                int16_t del_x = dx_row_ptr[j];
                int16_t del_y = dy_row_ptr[j];

                bool use_pixel = false;

                if(dmz_likely(del_x != 0)) { // avoid div by 0
                  float slope = (float)del_y / (float)del_x;
                  if(vertical) {
                    if(slope >= slope_bound_a && slope <= slope_bound_b) {
                      use_pixel = true;
                    }
                  } else {
                    if(slope >= slope_bound_a || slope <= slope_bound_b) {
                      use_pixel = true;
                    }
                  }
                } else {
                  use_pixel = !vertical;
                }

                if(use_pixel) {
                    for(n = 0; n < numangle; n++) {
                        r = (j * tabCos[n] + i * tabSin[n]) >> FIXED_POINT_EXPONENT;
                        r += (numrho - 1) / 2;
                        accum[(n+1) * (numrho+2) + r+1]++;
                    }
                }
            }
        }
    }
示例#12
0
static gboolean icvOnMouse( GtkWidget *widget, GdkEvent *event, gpointer user_data )
{
    // TODO move this logic to CvImageWidget
    CvWindow* window = (CvWindow*)user_data;
    CvPoint2D32f pt32f(-1., -1.);
    CvPoint pt(-1,-1);
    int cv_event = -1, state = 0;
    CvImageWidget * image_widget = CV_IMAGE_WIDGET( widget );

    if( window->signature != CV_WINDOW_MAGIC_VAL ||
        window->widget != widget || !window->widget ||
        !window->on_mouse /*|| !image_widget->original_image*/)
        return FALSE;

    if( event->type == GDK_MOTION_NOTIFY )
    {
        GdkEventMotion* event_motion = (GdkEventMotion*)event;

        cv_event = CV_EVENT_MOUSEMOVE;
        pt32f.x = cvRound(event_motion->x);
        pt32f.y = cvRound(event_motion->y);
        state = event_motion->state;
    }
    else if( event->type == GDK_BUTTON_PRESS ||
             event->type == GDK_BUTTON_RELEASE ||
             event->type == GDK_2BUTTON_PRESS )
    {
        GdkEventButton* event_button = (GdkEventButton*)event;
        pt32f.x = cvRound(event_button->x);
        pt32f.y = cvRound(event_button->y);


        if( event_button->type == GDK_BUTTON_PRESS )
        {
            cv_event = event_button->button == 1 ? CV_EVENT_LBUTTONDOWN :
                       event_button->button == 2 ? CV_EVENT_MBUTTONDOWN :
                       event_button->button == 3 ? CV_EVENT_RBUTTONDOWN : 0;
        }
        else if( event_button->type == GDK_BUTTON_RELEASE )
        {
            cv_event = event_button->button == 1 ? CV_EVENT_LBUTTONUP :
                       event_button->button == 2 ? CV_EVENT_MBUTTONUP :
                       event_button->button == 3 ? CV_EVENT_RBUTTONUP : 0;
        }
        else if( event_button->type == GDK_2BUTTON_PRESS )
        {
            cv_event = event_button->button == 1 ? CV_EVENT_LBUTTONDBLCLK :
                       event_button->button == 2 ? CV_EVENT_MBUTTONDBLCLK :
                       event_button->button == 3 ? CV_EVENT_RBUTTONDBLCLK : 0;
        }
        state = event_button->state;
    }

    if( cv_event >= 0 ){
        // scale point if image is scaled
        if( (image_widget->flags & CV_WINDOW_AUTOSIZE)==0 &&
             image_widget->original_image &&
             image_widget->scaled_image ){
            // image origin is not necessarily at (0,0)
            int x0 = (widget->allocation.width - image_widget->scaled_image->cols)/2;
            int y0 = (widget->allocation.height - image_widget->scaled_image->rows)/2;
            pt.x = cvRound( ((pt32f.x-x0)*image_widget->original_image->cols)/
                                            image_widget->scaled_image->cols );
            pt.y = cvRound( ((pt32f.y-y0)*image_widget->original_image->rows)/
                                            image_widget->scaled_image->rows );
        }
        else{
            pt = cvPointFrom32f( pt32f );
        }

//        if((unsigned)pt.x < (unsigned)(image_widget->original_image->width) &&
//           (unsigned)pt.y < (unsigned)(image_widget->original_image->height) )
        {
            int flags = (state & GDK_SHIFT_MASK ? CV_EVENT_FLAG_SHIFTKEY : 0) |
                (state & GDK_CONTROL_MASK ? CV_EVENT_FLAG_CTRLKEY : 0) |
                (state & (GDK_MOD1_MASK|GDK_MOD2_MASK) ? CV_EVENT_FLAG_ALTKEY : 0) |
                (state & GDK_BUTTON1_MASK ? CV_EVENT_FLAG_LBUTTON : 0) |
                (state & GDK_BUTTON2_MASK ? CV_EVENT_FLAG_MBUTTON : 0) |
                (state & GDK_BUTTON3_MASK ? CV_EVENT_FLAG_RBUTTON : 0);
            window->on_mouse( cv_event, pt.x, pt.y, flags, window->on_mouse_param );
        }
    }

        return FALSE;
    }
  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::FaceArrayStamped faces_msg;
      faces_msg.header = msg->header;

      // Do the work
      std::vector<cv::Rect> faces;
      cv::Mat frame_gray;

      cv::cvtColor( frame, frame_gray, cv::COLOR_BGR2GRAY );
      cv::equalizeHist( frame_gray, frame_gray );
      //-- Detect faces
#ifndef CV_VERSION_EPOCH
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0, cv::Size(30, 30) );
#else
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

      for( size_t i = 0; i < faces.size(); i++ )
      {
        cv::Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
        cv::ellipse( frame,  center, cv::Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, cv::Scalar( 255, 0, 255 ), 2, 8, 0 );
        opencv_apps::Face face_msg;
        face_msg.face.x = center.x;
        face_msg.face.y = center.y;
        face_msg.face.width = faces[i].width;
        face_msg.face.height = faces[i].height;

        cv::Mat faceROI = frame_gray( faces[i] );
        std::vector<cv::Rect> eyes;

        //-- In each face, detect eyes
#ifndef CV_VERSION_EPOCH
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0, cv::Size(30, 30) );
#else
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

        for( size_t j = 0; j < eyes.size(); j++ )
        {
          cv::Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
          int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
          cv::circle( frame, eye_center, radius, cv::Scalar( 255, 0, 0 ), 3, 8, 0 );

          opencv_apps::Rect eye_msg;
          eye_msg.x = eye_center.x;
          eye_msg.y = eye_center.y;
          eye_msg.width = eyes[j].width;
          eye_msg.height = eyes[j].height;
          face_msg.eyes.push_back(eye_msg);
        }

        faces_msg.faces.push_back(face_msg);
      }
      //-- Show what you got
      if( debug_view_) {
        cv::imshow( "Face detection", frame );
        int c = cv::waitKey(1);
      }

      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding,frame).toImageMsg();
      img_pub_.publish(out_img);
      msg_pub_.publish(faces_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }
示例#14
0
文件: testseq.cpp 项目: DevShah/18551
CvTestSeq* cvCreateTestSeq(char* pConfigfile, char** videos, int numvideo, float Scale, int noise_type, double noise_ampl)
{
    int             size = sizeof(CvTestSeq_);
    CvTestSeq_*     pTS = (CvTestSeq_*)cvAlloc(size);
    CvFileStorage*  fs = cvOpenFileStorage( pConfigfile, NULL, CV_STORAGE_READ);
    int         i;

    if(pTS == NULL || fs == NULL) return NULL;
    memset(pTS,0,size);

    pTS->pFileStorage = fs;
    pTS->noise_ampl = noise_ampl;
    pTS->noise_type = noise_type;
    pTS->IVar_DI = 0;
    pTS->ObjNum = 0;

    /* Read all videos: */
    for (i=0; i<numvideo; ++i)
    {
        CvTestSeqElem*  pElemNew = icvTestSeqReadElemAll(pTS, fs, videos[i]);

        if(pTS->pElemList==NULL)pTS->pElemList = pElemNew;
        else
        {
            CvTestSeqElem* p = NULL;
            for(p=pTS->pElemList;p->next;p=p->next) {}
            p->next = pElemNew;
        }
    }   /* Read all videos. */

    {   /* Calculate elements and image size and video length: */
        CvTestSeqElem*  p = pTS->pElemList;
        int             num = 0;
        CvSize          MaxSize = {0,0};
        int             MaxFN = 0;

        for(p = pTS->pElemList; p; p=p->next, num++)
        {
            int     FN = p->FrameBegin+p->FrameNum;
            CvSize  S = {0,0};

            if(p->pImg && p->BG)
            {
                S.width = p->pImg->width;
                S.height = p->pImg->height;
            }

            if(MaxSize.width < S.width) MaxSize.width = S.width;
            if(MaxSize.height < S.height) MaxSize.height = S.height;
            if(MaxFN < FN)MaxFN = FN;
        }

        pTS->ListNum = num;

        if(MaxSize.width == 0)MaxSize.width = 320;
        if(MaxSize.height == 0)MaxSize.height = 240;

        MaxSize.width = cvRound(Scale*MaxSize.width);
        MaxSize.height = cvRound(Scale*MaxSize.height);

        pTS->pImg = cvCreateImage(MaxSize,IPL_DEPTH_8U,3);
        pTS->pImgMask = cvCreateImage(MaxSize,IPL_DEPTH_8U,1);
        pTS->FrameNum = MaxFN;

        for(p = pTS->pElemList; p; p=p->next)
        {
            if(p->FrameNum<=0)p->FrameNum=MaxFN;
        }
    }   /* Calculate elements and image size. */

    return (CvTestSeq*)pTS;

}   /* cvCreateTestSeq */
示例#15
0
template<> inline short saturate_cast<short>(double v)       { int iv = cvRound(v); return saturate_cast<short>(iv); }
示例#16
0
// does a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
int cvCheckChessboard(IplImage* src, CvSize size)
{
    if(src->nChannels > 1)
    {
        cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only",
                __FILE__, __LINE__);
    }

    if(src->depth != 8)
    {
        cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only",
                __FILE__, __LINE__);
    }

    const int erosion_count = 1;
    const float black_level = 20.f;
    const float white_level = 130.f;
    const float black_white_gap = 70.f;

#if defined(DEBUG_WINDOWS)
    cvNamedWindow("1", 1);
    cvShowImage("1", src);
    cvWaitKey(0);
#endif //DEBUG_WINDOWS

    CvMemStorage* storage = cvCreateMemStorage();

    IplImage* white = cvCloneImage(src);
    IplImage* black = cvCloneImage(src);

    cvErode(white, white, NULL, erosion_count);
    cvDilate(black, black, NULL, erosion_count);
    IplImage* thresh = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);

    int result = 0;
    for(float thresh_level = black_level; thresh_level < white_level && !result; thresh_level += 20.0f)
    {
        cvThreshold(white, thresh, thresh_level + black_white_gap, 255, CV_THRESH_BINARY);

#if defined(DEBUG_WINDOWS)
        cvShowImage("1", thresh);
        cvWaitKey(0);
#endif //DEBUG_WINDOWS

        CvSeq* first = 0;
        std::vector<std::pair<float, int> > quads;
        cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
        icvGetQuadrangleHypotheses(first, quads, 1);

        cvThreshold(black, thresh, thresh_level, 255, CV_THRESH_BINARY_INV);

#if defined(DEBUG_WINDOWS)
        cvShowImage("1", thresh);
        cvWaitKey(0);
#endif //DEBUG_WINDOWS

        cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
        icvGetQuadrangleHypotheses(first, quads, 0);

        const size_t min_quads_count = size.width*size.height/2;
        std::sort(quads.begin(), quads.end(), less_pred);

        // now check if there are many hypotheses with similar sizes
        // do this by floodfill-style algorithm
        const float size_rel_dev = 0.4f;

        for(size_t i = 0; i < quads.size(); i++)
        {
            size_t j = i + 1;
            for(; j < quads.size(); j++)
            {
                if(quads[j].first/quads[i].first > 1.0f + size_rel_dev)
                {
                    break;
                }
            }

            if(j + 1 > min_quads_count + i)
            {
                // check the number of black and white squares
                std::vector<int> counts;
                countClasses(quads, i, j, counts);
                const int black_count = cvRound(ceil(size.width/2.0)*ceil(size.height/2.0));
                const int white_count = cvRound(floor(size.width/2.0)*floor(size.height/2.0));
                if(counts[0] < black_count*0.75 ||
                        counts[1] < white_count*0.75)
                {
                    continue;
                }
                result = 1;
                break;
            }
        }
    }


    cvReleaseImage(&thresh);
    cvReleaseImage(&white);
    cvReleaseImage(&black);
    cvReleaseMemStorage(&storage);

    return result;
}
示例#17
0
template<> inline int saturate_cast<int>(float v)            { return cvRound(v); }
Mat detect_Face_and_eyes( Mat& img, double scale, QVector <face> &find_faces)
{
    vector<Rect> faces;
    const static Scalar colors[] =
    {
        Scalar(255,0,0),
        Scalar(255,128,0),
        Scalar(255,255,0),
        Scalar(0,255,0),
        Scalar(0,128,255),
        Scalar(0,255,255),
        Scalar(0,0,255),
        Scalar(255,0,255)
    };
    Mat gray, smallImg;
    cvtColor( img, gray, COLOR_BGR2GRAY);
    double fx = 1 / scale;
    resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR );
    equalizeHist( smallImg, smallImg );
    obj.cascade.detectMultiScale( smallImg, faces,
                              1.1, 2, 0
                              |CASCADE_SCALE_IMAGE,
                              Size(30, 30) );
    for ( size_t i = 0; i < faces.size(); i++ )
    {
        Scalar color = colors[i%8];
        int radius;

        Rect r = faces[i];
        Mat smallImgROI;
        vector<Rect> nestedObjects;
        Point center;

        face temp;
        find_faces.push_back(temp);

        double aspect_ratio = (double)r.width/r.height;
        if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
        {
            center.x = cvRound((r.x + r.width*0.5)*scale);
            center.y = cvRound((r.y + r.height*0.5)*scale);
        }
        smallImgROI = smallImg( r );
        obj.nestedCascade.detectMultiScale(smallImgROI, nestedObjects,
                                       1.1, 2, 0
                                       |CASCADE_SCALE_IMAGE,
                                       Size(30, 30) );
        find_faces.value(i).set_coord_face(center);
        QVector <Point> write_eyes_array;
        QVector <int> write_radius_eyes_array;
        for ( size_t j = 0; j < nestedObjects.size(); j++ )
        {
            Rect nr = nestedObjects[j];
            center.x = cvRound((r.x + nr.x + nr.width*0.5)*scale);
            center.y = cvRound((r.y + nr.y + nr.height*0.5)*scale);

            radius = cvRound((nr.width + nr.height)*0.25*scale);
            if((radius>=20)&&((center.x>10)&&(center.x<img.size().width-10))&&((center.y>10)&&(center.x<img.size().height-10)))
            {
                write_radius_eyes_array.push_back(radius);
                write_eyes_array.push_back(center);
                circle(img, center, radius, color, 3, 8, 0 );
            }
        }
        find_faces[i].set_coord_eyes(write_eyes_array);
        find_faces[i].set_radius_eyes(write_radius_eyes_array);
    }
    return img;
}
示例#19
0
// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc.
template<> inline unsigned saturate_cast<unsigned>(float v)  { return cvRound(v); }
示例#20
0
void VisuoThread::updatePFTracker()
{
    Vector *trackVec=pftInPort.read(false);

    Vector stereo;
    if(trackVec!=NULL && trackVec->size()==12)
    {
        //must check if the tracker has gone mad.
        if(checkTracker(trackVec))
        {
            trackMutex.wait();
            stereoTracker.vec=*trackVec;
            trackMutex.post();

            stereo.resize(4);
            stereo[0]=stereoTracker.vec[0];
            stereo[1]=stereoTracker.vec[1];
            stereo[2]=stereoTracker.vec[6];
            stereo[3]=stereoTracker.vec[7];

            if(trackMode==MODE_TRACK_TEMPLATE)
                stereo_target.set(stereo);
        }
        else
        {
            trackMutex.wait();
            stereoTracker.vec.clear();
            stereoTracker.side=0;
            trackMutex.post();
        }
    }



    imgMutex.wait();
    if(img[LEFT]!=NULL && img[RIGHT]!=NULL)
    {
        Image drawImg[2];
        drawImg[LEFT]=*img[LEFT];
        drawImg[RIGHT]=*img[RIGHT];

        if(stereoTracker.vec.size()==12)
        {

            cvCircle(drawImg[LEFT].getIplImage(),cvPoint(cvRound(stereoTracker.vec[0]),cvRound(stereoTracker.vec[1])),3,cvScalar(0,255),3);
            cvRectangle(drawImg[LEFT].getIplImage(),cvPoint(cvRound(stereoTracker.vec[2]),cvRound(stereoTracker.vec[3])),
                                                 cvPoint(cvRound(stereoTracker.vec[4]),cvRound(stereoTracker.vec[5])),cvScalar(0,255),3);

            cvCircle(drawImg[RIGHT].getIplImage(),cvPoint(cvRound(stereoTracker.vec[6]),cvRound(stereoTracker.vec[7])),3,cvScalar(0,255),3);
            cvRectangle(drawImg[RIGHT].getIplImage(),cvPoint(cvRound(stereoTracker.vec[8]),cvRound(stereoTracker.vec[9])),
                                                 cvPoint(cvRound(stereoTracker.vec[10]),cvRound(stereoTracker.vec[11])),cvScalar(0,255),3);

            Bottle v;
            v.clear();
            Bottle &vl=v.addList();
            vl.addInt(cvRound(stereoTracker.vec[0]));
            vl.addInt(cvRound(stereoTracker.vec[1]));
            vl.addInt(stereoTracker.side);
            Bottle &vr=v.addList();
            vr.addInt(cvRound(stereoTracker.vec[6]));
            vr.addInt(cvRound(stereoTracker.vec[7]));
            vr.addInt(stereoTracker.side);

            boundMILPort.write(v);
        }


        if(newImage[LEFT])
            outPort[LEFT].write(drawImg[LEFT]);
        
        if(newImage[RIGHT])
            outPort[RIGHT].write(drawImg[RIGHT]);

        //avoid writing multiple times the same image
        newImage[LEFT]=false;
        newImage[RIGHT]=false;
    }
    imgMutex.post();
}
int main(int argc, char** argv)
{
    int ratio = 3;
    int kernel_size = 3;
    int sigma = 200;

    //connection to camera 
    CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);

    if(!capture)
    {
        std::cerr << "ERROR: capture is NULL\n";
        getchar();
        return -1;
    }

    /* ONLY NEEDED FOR TESTING PURPOSES

       cvNamedWindow("Camera Image", CV_WINDOW_AUTOSIZE);
    // Create a Trackbar for user to enter threshold
    cv::createTrackbar("Min Threshold:","Camera Image" , &lowThreshold, max_lowThreshold, DummyCallback);
    // Create a Trackbar for user to enter threshold
    cv::createTrackbar("Hough Threshold:","Camera Image" , &houghThreshold, max_houghThreshold, DummyCallback);
    // Create a Trackbar for user to enter threshold
    cv::createTrackbar("Sigma:","Camera Image" , &sigma, 1000, DummyCallback);

    cvNamedWindow("Edge Image", CV_WINDOW_AUTOSIZE);

*/

    ros::init(argc,argv, "circle_publisher");
    ros::NodeHandle nh;

    //Let's publish messages as defined in /msg/cirlce.msg on a topic called 'detected_circles' with a max. buffer of 1000 messages 
    ros::Publisher circle_publisher = nh.advertise<circle_detection::circle_msg>("detected_circles",1000); 

    ros::Rate loop_rate(10);

    //used to create an Image Id
    size_t id_count = 0;

    while(ros::ok)
    {
        // Get a frame
        IplImage* frame = cvQueryFrame(capture);

        if (!frame)
        {
            std::cerr << "ERROR: frame is null...\n" ;
            getchar();
            break;
        }

        // image id 
        id_count++;

        cv::Mat src(frame);
        cv::Mat src_gray;

        cv::Mat dst;
        cv::Mat detected_edges;

        dst.create(src.size(), src.type());

        // covert the image to gray
        cvtColor(src,src_gray,CV_BGR2GRAY);

        // Reduce the noise so we avoid false circle detection
        GaussianBlur(src_gray, detected_edges, cv::Size(9, 9), sigma/100.0, 0);

        equalizeHist(detected_edges, detected_edges);

        // Canny detector
        Canny(detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size);

        // Using Canny's output as a mask, we display our result
        dst = cv::Scalar::all(0);

        src.copyTo(dst, detected_edges);

        std::vector<Point> edgePoints;


        // iterate through the  pixels of the canny image
        for(int j=0;j<detected_edges.cols;j++)
        {
            for(int i=0;i<detected_edges.rows;i++)
            {
                unsigned char &color = *(detected_edges.data+detected_edges.step*i + j*detected_edges.elemSize());
                unsigned char &bb = *(src.data+src.step*i + j*src.elemSize());
                unsigned char &gg = *(src.data+src.step*i + j*src.elemSize() + 1);
                unsigned char &rr = *(src.data+src.step*i + j*src.elemSize() + 2);

                // check if the pixel is black or white (only edges are white)
                if(color)
                {
                    int max = max3(rr,gg,bb);
                    int min = min3(rr,gg,bb);
                    int delta = max - min;

                    // check saturation (only colorfull circles will be detacted
                    if(delta > 20)
                    {
                        edgePoints.push_back(Point((double) j,(double) i));
                    }
                    else
                    {
                        // mark pixel as no longer relevant, i.e. the pixel isn't recognized as edge
                        color = 0;
                    }
                }
            }
        }


        std::vector<Circle> detectedCircles;

        /* Apply the RANSAC algorithm to find circles in the camera image
         * Paramters: sink vector, points, eps, iterations, minSupporters, maxCircles
         */ 
        ransac(detectedCircles, edgePoints, 5.0, 10000, 100, 1);

        /* ONLY NEDDED FOR TESTIGN PURPOSES

        // Draw the circles detected
        for(size_t i = 0; i < detectedCircles.size(); i++)
        {
        cv::Point center(cvRound(detectedCircles[i].center.x), cvRound(detectedCircles[i].center.y));
        int radius = cvRound(detectedCircles[i].radius);

        //            assert(radius > 0);

        // circle center
        circle(src, center, 3, cv::Scalar(0,255,0), -1, 8, 0 );
        // circle outline
        circle(src, center, radius, cv::Scalar(0,0,255), 3, 8, 0 );
        }

        //Results
        imshow("Camera Image", src);
        imshow("Edge Image", detected_edges);

*/


        for(size_t i=0;i < detectedCircles.size();i++)
        {
            circle_detection::circle_msg msg;
            std::stringstream ss;

            ss << "IMG" << id_count;

            msg.image_id = ss.str();

            unsigned int radius = cvRound(detectedCircles[i].radius);

            msg.radius = radius;

            msg.center_x = cvRound(detectedCircles[i].center.x) ;
            msg.center_y = cvRound(detectedCircles[i].center.y);
            circle_publisher.publish(msg);

            ros::spinOnce();
            loop_rate.sleep();

        }



        //Do not release the frame!
        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
        //remove higher bits using AND operator
        //cvWaitKey() is used as delay between the frames
        if ( (cvWaitKey(100) & 255) == 27 ) break;
    }

    /* ONLY NEEDED FOR TESTING PURPOSES
     
    // Release the capture device housekeeping
       cvReleaseCapture( &capture );
       cvDestroyWindow( "Display Image" );

    */

    return 0;
}
示例#22
0
bool VisuoThread::getMotion(Bottle &bStereo)
{
    Vector stereo;

    bool ok=false;

    double t=Time::now();

    while(Time::now()-t<motionWaitThresh && !interrupted)
    {
        motMutex.wait();
        // If the buffers are sufficently dense and not so small, return true.
        double size=0.0;
        if (buffer[LEFT].size()>minMotionBufSize && buffer[RIGHT].size()>minMotionBufSize)
        {
            Vector p[2];
            for (int cam=0; cam<2; cam++)
            {
                double size_cam,u,v,n;
                double u_std,v_std;
                size_cam=u=v=0.0;
                u_std=v_std=0.0;
                n=1.0/buffer[cam].size();

                for (unsigned int i=0; i<buffer[cam].size(); i++)
                {
                    size_cam+=buffer[cam][i].size;
                    u+=buffer[cam][i].p.x;
                    v+=buffer[cam][i].p.y;
                    u_std+=buffer[cam][i].p.x*buffer[cam][i].p.x;
                    v_std+=buffer[cam][i].p.y*buffer[cam][i].p.y;
                }

                size_cam*=n;
                u*=n;
                v*=n;
                u_std=sqrt(n*u_std-u*u);
                v_std=sqrt(n*v_std-v*v);

                //check if the motion detected point is not wildly moving
                if (u_std<motionStdThresh && v_std<motionStdThresh)
                {
                    p[cam].resize(2);
                    p[cam][0]=u;
                    p[cam][1]=v;
                }
                else
                    break;

                size+=size_cam;
            }

            int side=cvRound(2*sqrt(size/3.1415)*2);

            if (p[LEFT].size()==2 && p[RIGHT].size()==2)
            {
                stereo.resize(4);
                stereo[0]=p[LEFT][0];
                stereo[1]=p[LEFT][1];
                stereo[2]=p[RIGHT][0];
                stereo[3]=p[RIGHT][1];
                
                startTracker(stereo,cvRound(side));

                ok=true;
            }
        }
        motMutex.post();
    }

    for(size_t i=0; i<stereo.size(); i++)
        bStereo.addDouble(stereo[i]);

    return ok;
}
示例#23
0
/**
 * @author      JIA Pei
 * @version     2010-02-07
 * @brief       Draw rects in a particular color on an image with
 * @param       oImg        -- output image
 * @param       color       -- input color
 * @return      void
*/
void VO_FaceCompPos::VO_DrawRects(cv::Mat& oImg, cv::Scalar color)
{
    cv::Point lefttop, rightbottom;
    
    // for the face
    lefttop.x = cvRound(this->m_rectObject.x);
    lefttop.y = cvRound(this->m_rectObject.y);
    rightbottom.x = cvRound(this->m_rectObject.x+this->m_rectObject.width);
    rightbottom.y = cvRound(this->m_rectObject.y+this->m_rectObject.height);
    cv::rectangle(oImg, lefttop, rightbottom, color, 2, 8, 0);
    
    // for the left eye
    lefttop.x = cvRound(this->m_rectLeftEye.x);
    lefttop.y = cvRound(this->m_rectLeftEye.y);
    rightbottom.x = cvRound(this->m_rectLeftEye.x+this->m_rectLeftEye.width);
    rightbottom.y = cvRound(this->m_rectLeftEye.y+this->m_rectLeftEye.height);
    cv::rectangle(oImg, lefttop, rightbottom, color, 1, 8, 0);
    
    // for the right eye
    lefttop.x = cvRound(this->m_rectRightEye.x);
    lefttop.y = cvRound(this->m_rectRightEye.y);
    rightbottom.x = cvRound(this->m_rectRightEye.x+this->m_rectRightEye.width);
    rightbottom.y = cvRound(this->m_rectRightEye.y+this->m_rectRightEye.height);
    cv::rectangle(oImg, lefttop, rightbottom, color, 1, 8, 0);
    
    // for the nose
    lefttop.x = cvRound(this->m_rectNose.x);
    lefttop.y = cvRound(this->m_rectNose.y);
    rightbottom.x = cvRound(this->m_rectNose.x+this->m_rectNose.width);
    rightbottom.y = cvRound(this->m_rectNose.y+this->m_rectNose.height);
    cv::rectangle(oImg, lefttop, rightbottom, color, 1, 8, 0);
    
    // for the mouth
    lefttop.x = cvRound(this->m_rectMouth.x);
    lefttop.y = cvRound(this->m_rectMouth.y);
    rightbottom.x = cvRound(this->m_rectMouth.x+this->m_rectMouth.width);
    rightbottom.y = cvRound(this->m_rectMouth.y+this->m_rectMouth.height);
    cv::rectangle(oImg, lefttop, rightbottom, color, 1, 8, 0);
}
示例#24
0
template<> inline uchar saturate_cast<uchar>(float v)        { int iv = cvRound(v); return saturate_cast<uchar>(iv); }
示例#25
0
文件: calib.cpp 项目: mvernacc/RT
void StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true)
{
    if( imagelist.size() % 2 != 0 )
    {
        cout << "Error: the image list contains odd (non-even) number of elements\n";
        return;
    }
    printf("board size: %d x %d", boardSize.width, boardSize.height);
    bool displayCorners = true;
    const int maxScale = 2;
    const float squareSize = 1.f;  // Set this to your actual square size
    // ARRAY AND VECTOR STORAGE:

    vector<vector<Point2f> > imagePoints[2];
    vector<vector<Point3f> > objectPoints;
    Size imageSize;

    int i, j, k, nimages = (int)imagelist.size()/2;

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    vector<string> goodImageList;

    for( i = j = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            const string& filename = imagelist[i*2+k];
            Mat img = imread(filename, 0);
            if(img.empty())
                break;
            if( imageSize == Size() )
                imageSize = img.size();
            else if( img.size() != imageSize )
            {
                cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
                break;
            }
            bool found = false;
            vector<Point2f>& corners = imagePoints[k][j];
            for( int scale = 1; scale <= maxScale; scale++ )
            {
                Mat timg;
                if( scale == 1 )
                    timg = img;
                else
                    resize(img, timg, Size(), scale, scale);
                found = findChessboardCorners(timg, boardSize, corners,
                    CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
                if( found )
                {
                    if( scale > 1 )
                    {
                        Mat cornersMat(corners);
                        cornersMat *= 1./scale;
                    }
                    break;
                }
            }
            if( displayCorners )
            {
                cout << filename << endl;
                Mat cimg, cimg1;
                cvtColor(img, cimg, CV_GRAY2BGR);
                drawChessboardCorners(cimg, boardSize, corners, found);
                double sf = 640./MAX(img.rows, img.cols);
                resize(cimg, cimg1, Size(), sf, sf);
                imshow("corners", cimg1);
                char c = (char)waitKey(500);
                if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
                    exit(-1);
            }
            else
                putchar('.');
            if( !found )
                break;
            cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
                         TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
                                      30, 0.01));
        }
        if( k == 2 )
        {
            goodImageList.push_back(imagelist[i*2]);
            goodImageList.push_back(imagelist[i*2+1]);
            j++;
        }
    }
    cout << j << " pairs have been successfully detected.\n";
    nimages = j;
    if( nimages < 2 )
    {
        cout << "Error: too little pairs to run the calibration\n";
        return;
    }

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    objectPoints.resize(nimages);

    for( i = 0; i < nimages; i++ )
    {
        for( j = 0; j < boardSize.height; j++ )
            for( k = 0; k < boardSize.width; k++ )
                objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0));
    }

    cout << "Running stereo calibration ...\n";

    Mat cameraMatrix[2], distCoeffs[2];
    cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
    cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
    Mat R, T, E, F;

    double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
                    cameraMatrix[0], distCoeffs[0],
                    cameraMatrix[1], distCoeffs[1],
                    imageSize, R, T, E, F,
                    TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
                    CV_CALIB_FIX_ASPECT_RATIO +
                    CV_CALIB_ZERO_TANGENT_DIST +
                    //CV_CALIB_SAME_FOCAL_LENGTH +
                    CV_CALIB_RATIONAL_MODEL +
                    CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
    cout << "done with RMS error=" << rms << endl;

// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
    double err = 0;
    int npoints = 0;
    vector<Vec3f> lines[2];
    for( i = 0; i < nimages; i++ )
    {
        int npt = (int)imagePoints[0][i].size();
        Mat imgpt[2];
        for( k = 0; k < 2; k++ )
        {
            imgpt[k] = Mat(imagePoints[k][i]);
            undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
            computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
        }
        for( j = 0; j < npt; j++ )
        {
            double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
                                imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
                           fabs(imagePoints[1][i][j].x*lines[0][j][0] +
                                imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
            err += errij;
        }
        npoints += npt;
    }
    cout << "average reprojection err = " <<  err/npoints << endl;

    // save intrinsic parameters
    FileStorage fs("calib/intrinsics.yml", CV_STORAGE_WRITE);
    if( fs.isOpened() )
    {
        fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
            "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
        fs.release();
    }
    else
        cout << "Error: can not save the intrinsic parameters\n";

    Mat R1, R2, P1, P2, Q;
    Rect validRoi[2];

    stereoRectify(cameraMatrix[0], distCoeffs[0],
                  cameraMatrix[1], distCoeffs[1],
                  imageSize, R, T, R1, R2, P1, P2, Q,
                  CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);

    fs.open("calib/extrinsics.yml", CV_STORAGE_WRITE);
    if( fs.isOpened() )
    {
        fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
        fs.release();
    }
    else
        cout << "Error: can not save the intrinsic parameters\n";

    // OpenCV can handle left-right
    // or up-down camera arrangements
    bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));

// COMPUTE AND DISPLAY RECTIFICATION
    if( !showRectified )
        return;

    Mat rmap[2][2];
// IF BY CALIBRATED (BOUGUET'S METHOD)
    if( useCalibrated )
    {
        // we already computed everything
    }
// OR ELSE HARTLEY'S METHOD
    else
 // use intrinsic parameters of each camera, but
 // compute the rectification transformation directly
 // from the fundamental matrix
    {
        vector<Point2f> allimgpt[2];
        for( k = 0; k < 2; k++ )
        {
            for( i = 0; i < nimages; i++ )
                std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
        }
        F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
        Mat H1, H2;
        stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);

        R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
        R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
        P1 = cameraMatrix[0];
        P2 = cameraMatrix[1];
    }

    //Precompute maps for cv::remap()
    initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
    initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);

    Mat canvas;
    double sf;
    int w, h;
    if( !isVerticalStereo )
    {
        sf = 600./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h, w*2, CV_8UC3);
    }
    else
    {
        sf = 300./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h*2, w, CV_8UC3);
    }

    for( i = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
            remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
            cvtColor(rimg, cimg, CV_GRAY2BGR);
            Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
            resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);
            if( useCalibrated )
            {
                Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
                          cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
                rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);
            }
        }

        if( !isVerticalStereo )
            for( j = 0; j < canvas.rows; j += 16 )
                line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
        else
            for( j = 0; j < canvas.cols; j += 16 )
                line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
        imshow("rectified", canvas);
        char c = (char)waitKey();
        if( c == 27 || c == 'q' || c == 'Q' )
            break;
    }
}
示例#26
0
template<> inline schar saturate_cast<schar>(double v)       { int iv = cvRound(v); return saturate_cast<schar>(iv); }
/*F///////////////////////////////////////////////////////////////////////////////////////
//    Name:       icvCalcContrastHist8uC1R
//    Purpose:    Calculating the histogram of contrast from one-channel images
//    Context:
//    Parameters:
//    Returns:
//    Notes:      if dont_clear parameter is NULL then histogram clearing before
//                calculating (all values sets to NULL)
//F*/
static CvStatus CV_STDCALL
icvCalcContrastHist8uC1R( uchar** img, int step, CvSize size,
                          CvHistogram* hist, int dont_clear )
{
    int i, j, t, x = 0, y = 0;
    int dims;

    if( !hist || !img )
        return CV_NULLPTR_ERR;

    dims = hist->c_dims;
    if( dims != 1 )
        return CV_BADSIZE_ERR;

    if( hist->type != CV_HIST_ARRAY )
        return CV_BADFLAG_ERR;

    for( i = 0; i < dims; i++ )
        if( !img[i] )
            return CV_NULLPTR_ERR;

    for( i = 0; i < hist->c_dims; i++ )
    {
        if( !hist->thresh[i] )
            return CV_NULLPTR_ERR;
        assert( hist->chdims[i] );
    }

    j = hist->dims[0] * hist->mdims[0];

    int *n = (int *)cvAlloc( (size_t)hist->dims[0] * sizeof( int ));

    if( hist->type == CV_HIST_ARRAY )
    {
        if( !dont_clear )
            for( i = 0; i < j; i++ )
            {
                hist->array[i] = 0;
                n[i] = 0;
            }

        switch (hist->c_dims)
        {
        case 1:
            {
                uchar *data0 = img[0];
                int *array = (int *) hist->array;
                int *chdims = hist->chdims[0];

                for( i = 0; i < j; i++ )
                    array[i] = cvRound( hist->array[i] );

                for( y = 0; y < size.height; y++, data0 += step )
                {
                    for( x = 0; x <= size.width - 1; x += 2 )
                    {
                        int v1_r = MIN( data0[x], data0[x + 1] );
                        int v2_r = MAX( data0[x], data0[x + 1] );

//    calculate contrast for the right-left pair 
                        for( t = v1_r; t < v2_r; t++ )
                        {
                            int val0 = chdims[t + 128];

                            array[val0] += MIN( t - v1_r, v2_r - t );
                            n[val0]++;
                        }

                        if( y < size.height - 1 )
                        {
                            int v1_d = MIN( data0[x], data0[x + step] );
                            int v2_d = MAX( data0[x], data0[x + step] );

//    calculate contrast for the top-down pair 
                            for( t = v1_d; t < v2_d; t++ )
                            {
                                int val0 = chdims[t + 128];

                                array[val0] += MIN( t - v1_d, v2_d - t );
                                n[val0]++;
                            }
                        }
                    }
                }

//  convert int to float 
                for( i = 0; i < j; i++ )
                {
                    if( n[i] != 0 )
                        hist->array[i] = (float) array[i] / n[i];
                    else
                        hist->array[i] = 0;
                }
            }
            break;
        default:
            return CV_BADSIZE_ERR;
        }
    }

    cvFree( &n );
    return CV_NO_ERR;
}
示例#28
0
template<> inline ushort saturate_cast<ushort>(float v)      { int iv = cvRound(v); return saturate_cast<ushort>(iv); }
示例#29
0
int
main (int argc, char **argv)
{
	int i;
	int gui = true;
	IplImage *src_img = 0, *src_gray = 0;
	const char *cascade_name = "haarcascade_frontalface_default.xml";
	CvHaarClassifierCascade *cascade = 0;
	CvMemStorage *storage = 0;
	CvSeq *faces;
	static CvScalar colors[] = {
		{{0, 0, 255}}, {{0, 128, 255}},
		{{0, 255, 255}}, {{0, 255, 0}},
		{{255, 128, 0}}, {{255, 255, 0}},
		{{255, 0, 0}}, {{255, 0, 255}}
	};

	// (1)画像を読み込む
	if (argc < 2 || (src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_COLOR)) == 0)
		return -1;
	if (argc == 3 && strcmp("--no-gui", argv[2]) == 0 )
		gui = false;
	
	src_gray = cvCreateImage (cvGetSize (src_img), IPL_DEPTH_8U, 1);

	// (2)ブーストされた分類器のカスケードを読み込む
	cascade = (CvHaarClassifierCascade *) cvLoad (cascade_name, 0, 0, 0);

	// (3)メモリを確保し,読み込んだ画像のグレースケール化,ヒストグラムの均一化を行う
	storage = cvCreateMemStorage (0);
	cvClearMemStorage (storage);
	cvCvtColor (src_img, src_gray, CV_BGR2GRAY);
	cvEqualizeHist (src_gray, src_gray);

	// (4)物体(顔)検出
	faces = cvHaarDetectObjects (src_gray, cascade, storage, 1.11, 4, 0, cvSize (40, 40));


	// puts("<faces>");
	printf("[{\"faces\":");
	// (5)検出された全ての顔位置に,円を描画する
	for (i = 0; i < (faces ? faces->total : 0); i++) {
		// puts("  <face>");
		
		CvRect *r = (CvRect *) cvGetSeqElem (faces, i);
		CvPoint center;
		int radius;
 		// printf("    <top>%d</top>\n", r->y);
 		// printf("    <right>%d</right>\n", r->x + r->width);
 		// printf("    <bottom>%d</bottom>\n", r->y + r->height);
 		// printf("    <left>%d</left>\n", r->x);
		puts("[{");
		printf("	\"id\":%d,\n", 0);
		printf("	\"x\":%d,\n", r->x);
		printf("	\"y\":%d,\n", r->y);
		printf("	\"w\":%d,\n", r->width);
		printf("	\"h\":%d\n", r->height);
		puts("}]");
		if (i != faces->total - 1) {
			puts(",");
		}
		center.x = cvRound (r->x + r->width * 0.5);
		center.y = cvRound (r->y + r->height * 0.5);
		radius = cvRound ((r->width + r->height) * 0.25);
		cvCircle (src_img, center, radius, colors[i % 8], 3, 8, 0);
		// puts("  </face>");
	}
	// puts("</faces>");
	puts("}]");





	// (6)画像を表示,キーが押されたときに終了
	if (gui) {
		cvNamedWindow ("Face Detection", CV_WINDOW_AUTOSIZE);
		cvShowImage ("Face Detection", src_img);
		cvWaitKey (0);
	}

	cvDestroyWindow ("Face Detection");
	cvReleaseImage (&src_img);
	cvReleaseImage (&src_gray);
	cvReleaseMemStorage (&storage);

	return 0;
}
示例#30
0
bool LineFilter::filter(Data *data){
    // check for whether the input is of the correct type.          From Albert
    ImgData* imgData = dynamic_cast<ImgData*>(data);
    if (imgData == 0) {
        // track the error and return error
        this->track(data,this->filterID,1,1);
        return false;
    }
    cv::imshow("asdf", filter(imgData->getImg().clone(),0));

    //begin filter sequence
    int linesFound = 0;
    cv::Mat src = imgData->getImg();
    cv::Mat dst;
    cv::Mat cdst = src.clone();
    Canny(src, dst, 50, 200, 3);
    cvtColor(dst, cdst, CV_GRAY2BGR);

    std::vector<cv::Vec2f> lines;
    //detects lines
    HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );
    //ending filter sequence

    //calculating the line equation
    linesEq.clear();
    float x1 = 0, x2 = 0, y1 = 0, y2 = 0;
    for( size_t i = 0; i < lines.size(); i++ ){
        float rho = lines[i][0], theta = lines[i][1];
        cv::Point pt1, pt2;
        double a = cos(theta), b = sin(theta);
        double x0 = a*rho, y0 = b*rho;
        pt1.x = cvRound(x0 + 1000*(-b));
        pt1.y = cvRound(y0 + 1000*(a));
        pt2.x = cvRound(x0 - 1000*(-b));
        pt2.y = cvRound(y0 - 1000*(a));

        x1 = pt1.x;
        y1 = pt1.y;
        x2 = pt2.x;
        y2 = pt2.y;
        //equation of line
        std::vector<float> eq;

        //y = mx+b
        //B MIGHT BE USELSES, NEED FURTHER TESTING
        bool safeMath = true;
        float M = 0, B = 0;
        if (x2-x1 < 5){     //straight line
            safeMath = false;
            M = INFINITY;
            B = INFINITY;
        }
        if (safeMath){      //avoid div by 0 error
            //M = (y2-y1) / (x2-x1);
            double realtheta = (rho < 0)? theta - M_PI:theta;
            realtheta = -realtheta +  M_PI/2;
            M = tan(realtheta);
            B = y2 - M*x2;
        }

        bool repeat = false;
        //check if there is a similar line already
        for (std::vector<float> lines: linesEq){
            //vert line situations
            if (M == INFINITY && lines[0] == INFINITY){
                //check their x values
                if (std::abs(lines[2] - ((x1+x2)/2)) < maxDiff){
                    repeat = true;
                    break;
                }
            }
            //check if m is almost vertical
            else if (std::abs(M) > maxSlope && lines[0] == INFINITY){
                //std::cout<<"almost vert ";
                //std::cout<<std::abs(lines[2] - ((x1+x2)/2))<<std::endl;
                if (std::abs(lines[2] - ((x1+x2)/2) ) < maxDiff){
                    repeat = true;
                    break;
                }
            }
            else if (M == INFINITY && std::abs(lines[0])> maxSlope){
                //std::cout<<"almost vert II ";
                //std::cout<<std::abs(lines[2] - ((x1+x2)/2))<<std::endl;
                if (std::abs(lines[2] - ((x1+x2)/2) ) < maxDiff){
                    repeat = true;
                    break;
                }
            }
            //check if m is too similar or not, b is too different to check
            else if (std::abs(lines[0] - M) < maxDiff){
                if (M > 15){ //vertical lines
                    //check if the intersection point is near the average x
                    if (std::abs((B-lines[1])/(lines[0]-M))-(x1+x2)/2 < maxDiff){
                        repeat = true;
                        break;
                    }
                }else{      //horziontal lines
                    if (std::abs((B-lines[1])/(lines[0]-M))*M - (y1+y2)/2 < maxDiff){
                        repeat = true;
                        break;
                    }
                }
            }
        }

        if (!repeat){
            eq.push_back(M);
            eq.push_back(B);
            //std::cout<<M<<" "<<B<<" " << ((x1+x2)/2) << " ";
            if (std::abs(M) < 5){  //aprox horizontal line
                eq.push_back(y2);   //give it the y value
                //std::cout<<y2;
                //printf(" horz line");
            }
            if (std::abs(M) > maxSlope){ //vertical line
                eq.push_back(x2);   //x value
                //std::cout<<x2;
                //printf(" vertal line");
            }
            //std::cout<<std::endl;

            linesEq.push_back(eq);
            linesFound++;
            //line(*cdst, pt1, pt2, cv::Scalar(0,0,255), 3, CV_AA);     //drawing the line
        }
    }

    //should i set imgData to something?? -Carl

    //track and return
    this->track(imgData, this->filterID, 0,0);
    //println(linesFound != 0);
    return linesFound != 0;
}