void callback(const sensor_msgs::ImageConstPtr& msg)
    {
        if (image_0_ == NULL)
        {
            // Take first image:
            try
            {
                image_0_ = cv_bridge::toCvCopy(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take first image: " << e.what());
                return;
            }

            ROS_INFO("First image taken");

            // Detect keypoints:
            detector_->detect(image_0_->image, keypoints_0_);
            ROS_INFO_STREAM(keypoints_0_.size() << " points found.");

            // Extract keypoints' descriptors:
            extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
        }
        else
        {
            // Take second image:
            try
            {
                image_1_ = cv_bridge::toCvShare(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take image: " << e.what());
                return;
            }

            // Detect keypoints:
            std::vector<cv::KeyPoint> keypoints_1;
            detector_->detect(image_1_->image, keypoints_1);
            ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");

            // Extract keypoints' descriptors:
            cv::Mat descriptors_1;
            extractor_->compute(image_1_->image, keypoints_1, descriptors_1);

            // Compute matches:
            std::vector<cv::DMatch> matches;
            match(descriptors_0_, descriptors_1, matches);

            // Compute homography:
            cv::Mat H;
            homography(keypoints_0_, keypoints_1, matches, H);

            // Draw matches:
            const int s = std::max(image_0_->image.rows, image_0_->image.cols);
            cv::Size size(s, s);
            cv::Mat draw_image;
            warped_image_ = boost::make_shared<cv_bridge::CvImage>(
                    image_0_->header, image_0_->encoding,
                    cv::Mat(size, image_0_->image.type()));
            if (!H.empty()) // filter outliers
            {
                std::vector<char> matchesMask(matches.size(), 0);

                const size_t N = matches.size();
                std::vector<int> queryIdxs(N), trainIdxs(N);
                for (size_t i = 0; i < N; ++i)
                {
                    queryIdxs[i] = matches[i].queryIdx;
                    trainIdxs[i] = matches[i].trainIdx;
                }

                std::vector<cv::Point2f> points1, points2;
                cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
                cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);

                cv::Mat points1t;
                cv::perspectiveTransform(cv::Mat(points1), points1t, H);

                double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
                for (size_t i1 = 0; i1 < points1.size(); ++i1)
                {
                    if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
                        matchesMask[i1] = 1;
                }
                // draw inliers
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

                // draw outliers
                for (size_t i1 = 0; i1 < matchesMask.size(); ++i1)
                    matchesMask[i1] = !matchesMask[i1];
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 0, 255), cv::Scalar(255, 0, 0),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_OVER_OUTIMG | cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

                ROS_INFO_STREAM("Number of inliers: " << cv::countNonZero(matchesMask));

                // Wrap the new image using the homography,
                // so we should have something similar to the original image:
                warpPerspective(
                        image_1_->image, warped_image_->image, H, size,
                        cv::INTER_LINEAR | cv::WARP_INVERSE_MAP);

                // Print the homography found:
                ROS_INFO_STREAM("Homography = " << H);
            }
            else
            {
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image);

                image_1_->image.copyTo(warped_image_->image);

                ROS_WARN_STREAM("No homography transformation found!");
            }

            // Publish warped image (using homography):
            warped_image_->header = image_1_->header;
            pub_.publish(warped_image_->toImageMsg());

            // Show images:
            cv::imshow("correspondences", draw_image);
            cv::imshow("homography", warped_image_->image);
            cv::waitKey(3);
        }
    }
 CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
     Detector(detector)
 {
     CV_Assert(!detector.empty());
 }
 int main(int argc, char* argv[]) {
     // welcome message
     std::cout<<"*********************************************************************************"<<std::endl;
     std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
     std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
     std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
     std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
     std::cout<<"* The retina model still have the following properties:"<<std::endl;
     std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
     std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
     std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
     std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
     std::cout<<"* for more information, reer to the following papers :"<<std::endl;
     std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
     std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
     std::cout<<"* => reports comments/remarks at [email protected]"<<std::endl;
     std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
     std::cout<<"*********************************************************************************"<<std::endl;
     std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
     std::cout<<"*********************************************************************************"<<std::endl;
     std::cout<<"*** You can use free tools to generate OpenEXR images from images sets   :    ***"<<std::endl;
     std::cout<<"*** =>  1. take a set of photos from the same viewpoint using bracketing      ***"<<std::endl;
     std::cout<<"*** =>  2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
     std::cout<<"*** =>  3. apply tone mapping with this program                               ***"<<std::endl;
     std::cout<<"*********************************************************************************"<<std::endl;

     // basic input arguments checking
     if (argc<4)
     {
         help("bad number of parameter");
         return -1;
     }

     bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing

     int startFrameIndex=0, endFrameIndex=0, currentFrameIndex=0;
     sscanf(argv[2], "%d", &startFrameIndex);
     sscanf(argv[3], "%d", &endFrameIndex);
     std::string inputImageNamePrototype(argv[1]);

     //////////////////////////////////////////////////////////////////////////////
     // checking input media type (still image, video file, live video acquisition)
     std::cout<<"RetinaDemo: setting up system with first image..."<<std::endl;
     loadNewFrame(inputImageNamePrototype, startFrameIndex, true);

     if (inputImage.empty())
     {
        help("could not load image, program end");
            return -1;
         }

     //////////////////////////////////////////////////////////////////////////////
     // Program start in a try/catch safety context (Retina may throw errors)
     try
     {
         /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
          * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
          */
         if (useLogSampling)
                {
                     retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
                 }
         else// -> else allocate "classical" retina :
             retina = cv::bioinspired::createRetina(inputImage.size());

        // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
        retina->write("RetinaDefaultParameters.xml");

                 // desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
                 retina->activateMovingContoursProcessing(false);

         // declare retina output buffers
         cv::Mat retinaOutput_parvo;

         /////////////////////////////////////////////
         // prepare displays and interactions
         histogramClippingValue=0; // default value... updated with interface slider

         std::string retinaInputCorrected("Retina input image (with cut edges histogram for basic pixels error avoidance)");
         cv::namedWindow(retinaInputCorrected,1);
         cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);

         std::string RetinaParvoWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping");
         cv::namedWindow(RetinaParvoWindow, 1);
         colorSaturationFactor=3;
         cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);

         retinaHcellsGain=40;
         cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);

         localAdaptation_photoreceptors=197;
         localAdaptation_Gcells=190;
         cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
         cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);

        std::string powerTransformedInput("EXR image with basic processing : 16bits=>8bits with gamma correction");

         /////////////////////////////////////////////
         // apply default parameters of user interaction variables
         callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
         callback_saturateColors(1, NULL);

         // processing loop with stop condition
         currentFrameIndex=startFrameIndex;
         while(currentFrameIndex <= endFrameIndex)
         {
             loadNewFrame(inputImageNamePrototype, currentFrameIndex, false);

             if (inputImage.empty())
             {
                std::cout<<"Could not load new image (index = "<<currentFrameIndex<<"), program end"<<std::endl;
                return -1;
             }
            // display input & process standard power transformation
            imshow("EXR image original image, 16bits=>8bits linear rescaling ", imageInputRescaled);
            cv::Mat gammaTransformedImage;
            cv::pow(imageInputRescaled, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
            imshow(powerTransformedInput, gammaTransformedImage);
             // run retina filter
             retina->run(imageInputRescaled);
             // Retrieve and display retina output
             retina->getParvo(retinaOutput_parvo);
             cv::imshow(retinaInputCorrected, imageInputRescaled/255.f);
             cv::imshow(RetinaParvoWindow, retinaOutput_parvo);
             cv::waitKey(4);
            // jump to next frame
            ++currentFrameIndex;
         }
     }catch(cv::Exception e)
     {
         std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
     }

     // Program end message
     std::cout<<"Retina demo end"<<std::endl;

     return 0;
 }
示例#4
0
void processImage(cv::Mat& image) {
    if (image.empty())
        return;

#ifdef _OPENCV3
    pMOG->apply(image, fgMaskMOG, 0.05);
#else
    pMOG->operator()(image, fgMaskMOG, 0.05);
#endif
    cv::dilate(fgMaskMOG,fgMaskMOG,cv::getStructuringElement(cv::MORPH_ELLIPSE,cv::Size(15,15)));

    bin = new IplImage(fgMaskMOG);
    frame = new IplImage(image);
    labelImg = cvCreateImage(cvSize(image.cols,image.rows),IPL_DEPTH_LABEL,1);

    unsigned int result = cvLabel(bin, labelImg, blobs);
    cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE);
    cvFilterByArea(blobs, 1500, 40000);
    cvUpdateTracks(blobs, tracks, 200., 5);
    cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID);

    for (std::map<CvID, CvTrack*>::iterator track_it = tracks.begin(); track_it!=tracks.end(); track_it++) {
        CvID id = (*track_it).first;
        CvTrack* track = (*track_it).second;
        cur_pos = track->centroid;

        if (track->inactive == 0) {
            if (last_poses.count(id)) {
                std::map<CvID, CvPoint2D64f>::iterator pose_it = last_poses.find(id);
                last_pos = pose_it -> second;
                last_poses.erase(pose_it);
            }
            last_poses.insert(std::pair<CvID, CvPoint2D64f>(id, cur_pos));
            if (line_pos+25>cur_pos.y && cur_pos.y>line_pos && line_pos-25<last_pos.y && last_pos.y<line_pos) {
                count++;
                countUD++;
            }
            if (line_pos-25<cur_pos.y && cur_pos.y<line_pos && line_pos+25>last_pos.y && last_pos.y>line_pos) {
                count++;
                countDU++;
            }

            if ( cur_pos.y<line_pos+50 && cur_pos.y>line_pos-50) {
                avg_vel += abs(cur_pos.y-last_pos.y);
                count_active++;
            }

            //update heatmapfg
            heat_mapfg = cv::Mat::zeros(FR_H, FR_W, CV_8UC3);
            count_arr[lmindex] = count;
            avg_vel_arr[lmindex] = avg_vel/count_active ;
            for (int i=0; i<landmarks.size(); i++) {
                cv::circle(heat_mapfg, cv::Point((landmarks[i].y + 50)*2.4, (landmarks[i].x + 50)*2.4), count_arr[i]*3, cv::Scalar(0, 16*avg_vel_arr[i], 255 - 16*avg_vel_arr[i]), -1);
            }
            cv::GaussianBlur(heat_mapfg, heat_mapfg, cv::Size(15, 15), 5);
        } else {
            if (last_poses.count(id)) {
                last_poses.erase(last_poses.find(id));
            }
        }
    }

    cv::line(image, cv::Point(0, line_pos), cv::Point(FR_W, line_pos), cv::Scalar(0,255,0),2);
    cv::putText(image, "COUNT: "+to_string(count), cv::Point(10, 15), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,255,255));
    cv::putText(image, "UP->DOWN: "+to_string(countUD), cv::Point(10, 30), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,255,255));
    cv::putText(image, "DOWN->UP: "+to_string(countDU), cv::Point(10, 45), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,255,255));
    cv::imshow("BLOBS", image);
    cv::imshow("HEATMAP", heat_map + heat_mapfg);
    cv::waitKey(33);
}
void MapperGradEuclid::calculate(
    const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
    Mat gradx, grady, imgDiff;
    Mat img2;

    CV_DbgAssert(img1.size() == image2.size());
    CV_DbgAssert(img1.channels() == image2.channels());
    CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);

    if(!res.empty()) {
        // We have initial values for the registration: we move img2 to that initial reference
        res->inverseWarp(image2, img2);
    } else {
        img2 = image2;
    }

    // Matrices with reference frame coordinates
    Mat grid_r, grid_c;
    grid(img1, grid_r, grid_c);

    // Get gradient in all channels
    gradient(img1, img2, gradx, grady, imgDiff);

    // Calculate parameters using least squares
    Matx<double, 3, 3> A;
    Vec<double, 3> b;
    // For each value in A, all the matrix elements are added and then the channels are also added,
    // so we have two calls to "sum". The result can be found in the first element of the final
    // Scalar object.
    Mat xIy_yIx = grid_c.mul(grady);
    xIy_yIx -= grid_r.mul(gradx);

    A(0, 0) = sum(sum(gradx.mul(gradx)))[0];
    A(0, 1) = sum(sum(gradx.mul(grady)))[0];
    A(0, 2) = sum(sum(gradx.mul(xIy_yIx)))[0];
    A(1, 1) = sum(sum(grady.mul(grady)))[0];
    A(1, 2) = sum(sum(grady.mul(xIy_yIx)))[0];
    A(2, 2) = sum(sum(xIy_yIx.mul(xIy_yIx)))[0];
    A(1, 0) = A(0, 1);
    A(2, 0) = A(0, 2);
    A(2, 1) = A(1, 2);

    b(0) = -sum(sum(imgDiff.mul(gradx)))[0];
    b(1) = -sum(sum(imgDiff.mul(grady)))[0];
    b(2) = -sum(sum(imgDiff.mul(xIy_yIx)))[0];

    // Calculate parameters. We use Cholesky decomposition, as A is symmetric.
    Vec<double, 3> k = A.inv(DECOMP_CHOLESKY)*b;

    double cosT = cos(k(2));
    double sinT = sin(k(2));
    Matx<double, 2, 2> linTr(cosT, -sinT, sinT, cosT);
    Vec<double, 2> shift(k(0), k(1));

    if(res.empty()) {
        res = Ptr<Map>(new MapAffine(linTr, shift));
    } else {
        MapAffine newTr(linTr, shift);
        res->compose(newTr);
   }
}
 int main(int argc, char* argv[]) {
     // welcome message
     std::cout<<"*********************************************************************************"<<std::endl;
     std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
     std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
     std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
     std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
     std::cout<<"* The retina model still have the following properties:"<<std::endl;
     std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
     std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
     std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
     std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
     std::cout<<"* for more information, reer to the following papers :"<<std::endl;
     std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
     std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
     std::cout<<"* => reports comments/remarks at [email protected]"<<std::endl;
     std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
     std::cout<<"*********************************************************************************"<<std::endl;
     std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
     std::cout<<"*********************************************************************************"<<std::endl;
     std::cout<<"*** You can use free tools to generate OpenEXR images from images sets   :    ***"<<std::endl;
     std::cout<<"*** =>  1. take a set of photos from the same viewpoint using bracketing      ***"<<std::endl;
     std::cout<<"*** =>  2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
     std::cout<<"*** =>  3. apply tone mapping with this program                               ***"<<std::endl;
     std::cout<<"*********************************************************************************"<<std::endl;

     // basic input arguments checking
     if (argc<2)
     {
         help("bad number of parameter");
         return -1;
     }

     bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
     int chosenMethod=0;
     if (!strcmp(argv[argc-1], "fast"))
     {
         chosenMethod=1;
         std::cout<<"Using fast method (no spectral whithning), adaptation of Meylan&al 2008 method"<<std::endl;
     }

     std::string inputImageName=argv[1];

     //////////////////////////////////////////////////////////////////////////////
     // checking input media type (still image, video file, live video acquisition)
     std::cout<<"RetinaDemo: processing image "<<inputImageName<<std::endl;
     // image processing case
     // declare the retina input buffer... that will be fed differently in regard of the input media
     inputImage = cv::imread(inputImageName, -1); // load image in RGB mode
     std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
     if (!inputImage.total())
     {
        help("could not load image, program end");
            return -1;
         }
     // rescale between 0 and 1
     normalize(inputImage, inputImage, 0.0, 1.0, cv::NORM_MINMAX);
     cv::Mat gammaTransformedImage;
     cv::pow(inputImage, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
     imshow("EXR image original image, 16bits=>8bits linear rescaling ", inputImage);
     imshow("EXR image with basic processing : 16bits=>8bits with gamma correction", gammaTransformedImage);
     if (inputImage.empty())
     {
         help("Input image could not be loaded, aborting");
         return -1;
     }

     //////////////////////////////////////////////////////////////////////////////
     // Program start in a try/catch safety context (Retina may throw errors)
     try
     {
         /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
          * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
          */
         if (useLogSampling)
         {
             retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
                 }
         else// -> else allocate "classical" retina :
             retina = cv::bioinspired::createRetina(inputImage.size());

         // create a fast retina tone mapper (Meyla&al algorithm)
         std::cout<<"Allocating fast tone mapper..."<<std::endl;
         //cv::Ptr<cv::RetinaFastToneMapping> fastToneMapper=createRetinaFastToneMapping(inputImage.size());
         std::cout<<"Fast tone mapper allocated"<<std::endl;

         // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
         retina->write("RetinaDefaultParameters.xml");

         // desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
         retina->activateMovingContoursProcessing(false);

         // declare retina output buffers
         cv::Mat retinaOutput_parvo;

         /////////////////////////////////////////////
         // prepare displays and interactions
         histogramClippingValue=0; // default value... updated with interface slider
         //inputRescaleMat = inputImage;
         //outputRescaleMat = imageInputRescaled;
         cv::namedWindow("Processing configuration",1);
         cv::createTrackbar("histogram edges clipping limit", "Processing configuration",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);

         colorSaturationFactor=3;
         cv::createTrackbar("Color saturation", "Processing configuration", &colorSaturationFactor,5,callback_saturateColors);

         retinaHcellsGain=40;
         cv::createTrackbar("Hcells gain", "Processing configuration",&retinaHcellsGain,100,callBack_updateRetinaParams);

         localAdaptation_photoreceptors=197;
         localAdaptation_Gcells=190;
         cv::createTrackbar("Ph sensitivity", "Processing configuration", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
         cv::createTrackbar("Gcells sensitivity", "Processing configuration", &localAdaptation_Gcells,199,callBack_updateRetinaParams);


         /////////////////////////////////////////////
         // apply default parameters of user interaction variables
         rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100);
         retina->setColorSaturation(true,(float)colorSaturationFactor);
         callBack_updateRetinaParams(1,NULL); // first call for default parameters setup

         // processing loop with stop condition
         bool continueProcessing=true;
         while(continueProcessing)
         {
             // run retina filter
             if (!chosenMethod)
             {
                 retina->run(imageInputRescaled);
                 // Retrieve and display retina output
                 retina->getParvo(retinaOutput_parvo);
                 cv::imshow("Retina input image (with cut edges histogram for basic pixels error avoidance)", imageInputRescaled/255.0);
                 cv::imshow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", retinaOutput_parvo);
                 cv::imwrite("HDRinput.jpg",imageInputRescaled/255.0);
                 cv::imwrite("RetinaToneMapping.jpg",retinaOutput_parvo);
             }
             else
             {
                 // apply the simplified hdr tone mapping method
                 cv::Mat fastToneMappingOutput;
                 retina->applyFastToneMapping(imageInputRescaled, fastToneMappingOutput);
                 cv::imshow("Retina fast tone mapping output : 16bit=>8bit image retina tonemapping", fastToneMappingOutput);
             }
             /*cv::Mat fastToneMappingOutput_specificObject;
             fastToneMapper->setup(3.f, 1.5f, 1.f);
             fastToneMapper->applyFastToneMapping(imageInputRescaled, fastToneMappingOutput_specificObject);
             cv::imshow("### Retina fast tone mapping output : 16bit=>8bit image retina tonemapping", fastToneMappingOutput_specificObject);
*/
             cv::waitKey(10);
         }
     }catch(cv::Exception e)
     {
         std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
     }

     // Program end message
     std::cout<<"Retina demo end"<<std::endl;

     return 0;
 }
void CHumanTracker::detectAndTrackFace()
{
    static ros::Time probe;

	// Do ROI
	debugFrame = rawFrame.clone();
	Mat img =  this->rawFrame(searchROI);

	faces.clear();
	ostringstream txtstr;
    const static Scalar colors[] =  { CV_RGB(0,0,255),
        CV_RGB(0,128,255),
        CV_RGB(0,255,255),
        CV_RGB(0,255,0),
        CV_RGB(255,128,0),
        CV_RGB(255,255,0),
        CV_RGB(255,0,0),
        CV_RGB(255,0,255)} ;
    Mat gray;
    Mat frame( cvRound(img.rows), cvRound(img.cols), CV_8UC1 );
    cvtColor( img, gray, CV_BGR2GRAY );
    resize( gray, frame, frame.size(), 0, 0, INTER_LINEAR );
    //equalizeHist( frame, frame );

	// This if for internal usage
    const ros::Time _n = ros::Time::now();
	double dt = (_n - probe).toSec();
	probe = _n;


	CvMat _image = frame;

    if (!storage.empty())
    {
        cvClearMemStorage(storage);
    }
    CvSeq* _objects = cvHaarDetectObjects(&_image, cascade, storage,
			1.2, initialScoreMin, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE, minFaceSize, maxFaceSize);

	vector<CvAvgComp> vecAvgComp;
	Seq<CvAvgComp>(_objects).copyTo(vecAvgComp);

	// End of using C API

	isFaceInCurrentFrame = (vecAvgComp.size() > 0);

    // This is a hack
    bool isProfileFace = false;
    if ((profileHackEnabled) && (!isFaceInCurrentFrame) && ((trackingState == STATE_REJECT) || (trackingState == STATE_REJECT)))
    {
        ROS_DEBUG("Using Profile Face hack ...");

        if (!storageProfile.empty()) {
            cvClearMemStorage(storageProfile);
        }
        CvSeq* _objectsProfile = cvHaarDetectObjects(&_image, cascadeProfile, storageProfile,
                1.2, initialScoreMin, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE, minFaceSize, maxFaceSize);
        vecAvgComp.clear();
        Seq<CvAvgComp>(_objectsProfile).copyTo(vecAvgComp);
        isFaceInCurrentFrame = (vecAvgComp.size() > 0);
        if (isFaceInCurrentFrame)
        {
            ROS_DEBUG("The hack seems to work!");
        }
        isProfileFace = true;
    }

	if (trackingState == STATE_LOST)
	{
		if (isFaceInCurrentFrame)
		{
			stateCounter++;
			trackingState = STATE_DETECT;
		}
	}

	if (trackingState == STATE_DETECT)
	{
		if (isFaceInCurrentFrame)
		{
			stateCounter++;
		}
		else
		{
			stateCounter = 0;
			trackingState = STATE_LOST;
		}

		if (stateCounter > minDetectFrames)
		{
			stateCounter = 0;
			trackingState = STATE_TRACK;
		}

	}

	if (trackingState == STATE_TRACK)
	{
		if (!isFaceInCurrentFrame)
		{
			trackingState = STATE_REJECT;
		}
	}

	if (trackingState == STATE_REJECT)
	{
		float covNorm = sqrt(
                        pow(KFTracker.errorCovPost.at<float>(0,0), 2) +
                        pow(KFTracker.errorCovPost.at<float>(1,1), 2)
						);

		if (!isFaceInCurrentFrame)
		{
			stateCounter++;
		}
		else
		{
			stateCounter = 0;
			trackingState = STATE_TRACK;
		}

		if ((stateCounter > minRejectFrames) && (covNorm > maxRejectCov))
		{
			trackingState = STATE_LOST;
			stateCounter = 0;
			resetKalmanFilter();
            reset();
		}
	}

	if ((trackingState == STATE_TRACK) || (trackingState == STATE_REJECT))
	{
		bool updateFaceHist = false;
		// This is important:

        KFTracker.transitionMatrix.at<float>(0,2) = dt;
        KFTracker.transitionMatrix.at<float>(1,3) = dt;

        Mat pred = KFTracker.predict();

		if (isFaceInCurrentFrame)
		{
            //std::cout << vecAvgComp.size() << " detections in image " << std::endl;
			float minCovNorm = 1e24;
			int i = 0;
			for( vector<CvAvgComp>::const_iterator rr = vecAvgComp.begin(); rr != vecAvgComp.end(); rr++, i++ )
			{
				copyKalman(KFTracker, MLSearch);
				CvRect r = rr->rect;
				r.x += searchROI.x;
				r.y += searchROI.y;
				double nr = rr->neighbors;
				Point center;
				Scalar color = colors[i%8];

				float normFaceScore = 1.0 - (nr / 40.0);
				if (normFaceScore > 1.0) normFaceScore = 1.0;
				if (normFaceScore < 0.0) normFaceScore = 0.0;
                setIdentity(MLSearch.measurementNoiseCov, Scalar_<float>::all(normFaceScore));

				center.x = cvRound(r.x + r.width*0.5);
				center.y = cvRound(r.y + r.height*0.5);

				measurement.at<float>(0) = r.x;
				measurement.at<float>(1) = r.y;
				measurement.at<float>(2) = r.width;
				measurement.at<float>(3) = r.height;

                MLSearch.correct(measurement);

				float covNorm = sqrt(
                    pow(MLSearch.errorCovPost.at<float>(0,0), 2) +
                    pow(MLSearch.errorCovPost.at<float>(1,1), 2)
				);

				if (covNorm < minCovNorm)
				{
					minCovNorm = covNorm;
					MLFace = *rr;
				}

//                if ((debugLevel & 0x02) == 0x02)
//                {
                    rectangle(debugFrame, center - Point(r.width*0.5, r.height*0.5), center + Point(r.width*0.5, r.height * 0.5), color);

                    txtstr.str("");
                    txtstr << "   Sc:" << rr->neighbors << " S:" << r.width << "x" << r.height;

                    putText(debugFrame, txtstr.str(), center, FONT_HERSHEY_PLAIN, 1, color);
//                }
			}

			// TODO: I'll fix this shit
            Rect r(MLFace.rect);
			r.x += searchROI.x;
			r.y += searchROI.y;
			faces.push_back(r);
			double nr = MLFace.neighbors;
			faceScore = nr;
            if (isProfileFace) faceScore = 0.0;
			float normFaceScore = 1.0 - (nr / 40.0);
			if (normFaceScore > 1.0) normFaceScore = 1.0;
			if (normFaceScore < 0.0) normFaceScore = 0.0;
            setIdentity(KFTracker.measurementNoiseCov, Scalar_<float>::all(normFaceScore));
			measurement.at<float>(0) = r.x;
			measurement.at<float>(1) = r.y;
			measurement.at<float>(2) = r.width;
			measurement.at<float>(3) = r.height;
            KFTracker.correct(measurement);

            // We see a face
			updateFaceHist = true;
		}
		else
		{
            KFTracker.statePost = KFTracker.statePre;
            KFTracker.errorCovPost = KFTracker.errorCovPre;
		}

        // TODO: MOVE THIS
        for (unsigned int k = 0; k < faces.size(); k++) {
            rectangle(debugFrame, faces.at(k), CV_RGB(128, 128, 128));
        }

        beleif.x = max<int>(KFTracker.statePost.at<float>(0), 0);
        beleif.y = max<int>(KFTracker.statePost.at<float>(1), 0);
        beleif.width = min<int>(KFTracker.statePost.at<float>(4), iWidth - beleif.x);
        beleif.height = min<int>(KFTracker.statePost.at<float>(5), iHeight - beleif.y);

		Point belCenter;
		belCenter.x = beleif.x + (beleif.width * 0.5);
		belCenter.y = beleif.y + (beleif.height * 0.5);

        if ((debugLevel & 0x02) == 0x02)
        {
            txtstr.str("");
//            txtstr << "P:" << std::setprecision(3) << faceUncPos << " S:" << beleif.width << "x" << beleif.height;
//            putText(debugFrame, txtstr.str(), belCenter + Point(0, 50), FONT_HERSHEY_PLAIN, 2, CV_RGB(255,0,0));

//            circle(debugFrame, belCenter, belRad, CV_RGB(255,0,0));
//            circle(debugFrame, belCenter, (belRad - faceUncPos < 0) ? 0 : (belRad - faceUncPos), CV_RGB(255,255,0));
//            circle(debugFrame, belCenter, belRad + faceUncPos, CV_RGB(255,0,255));
        }

        //searchROI.x = max<int>(belCenter.x - KFTracker.statePost.at<float>(4) * 2, 0);
        //searchROI.y = max<int>(belCenter.y - KFTracker.statePost.at<float>(5) * 2, 0);
        //int x2 = min<int>(belCenter.x + KFTracker.statePost.at<float>(4) * 2, iWidth);
        //int y2 = min<int>(belCenter.y + KFTracker.statePost.at<float>(4) * 2, iHeight);
		//searchROI.width = x2 - searchROI.x;
		//searchROI.height = y2 - searchROI.y;


		if ((updateFaceHist) && (skinEnabled))
		{
            //updateFaceHist is true when we see a real face (not all the times)
            Rect samplingWindow;
//            samplingWindow.x = beleif.x + (0.25 * beleif.width);
//            samplingWindow.y = beleif.y + (0.1 * beleif.height);
//            samplingWindow.width = beleif.width * 0.5;
//            samplingWindow.height = beleif.height * 0.9;
            samplingWindow.x = measurement.at<float>(0) + (0.25 * measurement.at<float>(2));
            samplingWindow.y = measurement.at<float>(1) + (0.10 * measurement.at<float>(3));
            samplingWindow.width = measurement.at<float>(2) * 0.5;
            samplingWindow.height = measurement.at<float>(3) * 0.9;
            if ((debugLevel & 0x04) == 0x04)
            {
                rectangle(debugFrame, samplingWindow, CV_RGB(255,0,0));
            }
			Mat _face = rawFrame(samplingWindow);
			generateRegionHistogram(_face, faceHist);
		}


	}


//    if ((debugLevel & 0x02) == 0x02)
//    {
//        rectangle(debugFrame, searchROI, CV_RGB(0,0,0));
//        txtstr.str("");
//        txtstr << strStates[trackingState] << "(" << std::setprecision(3) << (dt * 1e3) << "ms )";
//        putText(debugFrame, txtstr.str() , Point(30,300), FONT_HERSHEY_PLAIN, 2, CV_RGB(255,255,255));
//    }

//	dt =  ((double) getTickCount() - t) / ((double) getTickFrequency()); // In Seconds
}
bool findCirclesGridAB( cv::InputArray _image, cv::Size patternSize,
		cv::OutputArray _centers, int flags, const cv::Ptr<cv::FeatureDetector> &blobDetector )
{
    bool isAsymmetricGrid = (flags & cv::CALIB_CB_ASYMMETRIC_GRID) ? true : false;
    bool isSymmetricGrid  = (flags & cv::CALIB_CB_SYMMETRIC_GRID ) ? true : false;
    CV_Assert(isAsymmetricGrid ^ isSymmetricGrid);

    cv::Mat image = _image.getMat();
    std::vector<cv::Point2f> centers;

    std::vector<cv::KeyPoint> keypoints;
    blobDetector->detect(image, keypoints);
    std::vector<cv::Point2f> points;
    for (size_t i = 0; i < keypoints.size(); i++)
    {
      points.push_back (keypoints[i].pt);
    }

    if(flags & cv::CALIB_CB_CLUSTERING)
    {
      CirclesGridClusterFinder circlesGridClusterFinder(isAsymmetricGrid);
      circlesGridClusterFinder.findGrid(points, patternSize, centers);
      cv::Mat(centers).copyTo(_centers);
      return !centers.empty();
    }

    CirclesGridFinderParameters parameters;
    parameters.vertexPenalty = -0.6f;
    parameters.vertexGain = 1;
    parameters.existingVertexGain = 10000;
    parameters.edgeGain = 1;
    parameters.edgePenalty = -0.6f;

    if(flags & cv::CALIB_CB_ASYMMETRIC_GRID)
      parameters.gridType = CirclesGridFinderParameters::ASYMMETRIC_GRID;
    if(flags & cv::CALIB_CB_SYMMETRIC_GRID)
      parameters.gridType = CirclesGridFinderParameters::SYMMETRIC_GRID;

    const int attempts = 2;
    const size_t minHomographyPoints = 4;
    cv::Mat H;
    for (int i = 0; i < attempts; i++)
    {
      centers.clear();
      CirclesGridFinder boxFinder(patternSize, points, parameters);
      bool isFound = false;
//#define BE_QUIET 1
#if BE_QUIET
      void* oldCbkData;
      //cv::ErrorCallback oldCbk = redirectError(quiet_error, 0, &oldCbkData);
#endif
      try
      {
        isFound = boxFinder.findHoles();
      }
      catch (cv::Exception)
      {

      }
#if BE_QUIET
      redirectError(oldCbk, oldCbkData);
#endif
      if (isFound)
      {
      	switch(parameters.gridType)
      	{
          case CirclesGridFinderParameters::SYMMETRIC_GRID:
            boxFinder.getHoles(centers);
            break;
          case CirclesGridFinderParameters::ASYMMETRIC_GRID:
	    boxFinder.getAsymmetricHoles(centers);
	    break;
          default:
            CV_Error(CV_StsBadArg, "Unkown pattern type");
      	}

        if (i != 0)
        {
        	cv::Mat orgPointsMat;
        	cv::transform(centers, orgPointsMat, H.inv());
        	cv::convertPointsFromHomogeneous(orgPointsMat, centers);
        }
        cv::Mat(centers).copyTo(_centers);
        return true;
      }

      boxFinder.getHoles(centers);
      if (i != attempts - 1)
      {
        if (centers.size() < minHomographyPoints)
          break;
        H = CirclesGridFinder::rectifyGrid(boxFinder.getDetectedGridSize(), centers, points, points);
      }
    }
    cv::Mat(centers).copyTo(_centers);
    return false;
}
void MapperGradProj::calculate(
    const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
    Mat gradx, grady, imgDiff;
    Mat img2;

    CV_DbgAssert(img1.size() == image2.size());
    CV_DbgAssert(img1.channels() == image2.channels());
    CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);

    if(!res.empty()) {
        // We have initial values for the registration: we move img2 to that initial reference
        res->inverseWarp(image2, img2);
    } else {
        img2 = image2;
    }

    // Get gradient in all channels
    gradient(img1, img2, gradx, grady, imgDiff);

    // Matrices with reference frame coordinates
    Mat grid_r, grid_c;
    grid(img1, grid_r, grid_c);

    // Calculate parameters using least squares
    Matx<double, 8, 8> A;
    Vec<double, 8> b;
    // For each value in A, all the matrix elements are added and then the channels are also added,
    // so we have two calls to "sum". The result can be found in the first element of the final
    // Scalar object.
    Mat xIx = grid_c.mul(gradx);
    Mat xIy = grid_c.mul(grady);
    Mat yIx = grid_r.mul(gradx);
    Mat yIy = grid_r.mul(grady);
    Mat Ix2 = gradx.mul(gradx);
    Mat Iy2 = grady.mul(grady);
    Mat xy = grid_c.mul(grid_r);
    Mat IxIy = gradx.mul(grady);
    Mat x2 = grid_c.mul(grid_c);
    Mat y2 = grid_r.mul(grid_r);
    Mat G = xIx + yIy;
    Mat G2 = sqr(G);
    Mat IxG = gradx.mul(G);
    Mat IyG = grady.mul(G);

    A(0, 0) = sum(sum(x2.mul(Ix2)))[0];
    A(1, 0) = sum(sum(xy.mul(Ix2)))[0];
    A(2, 0) = sum(sum(grid_c.mul(Ix2)))[0];
    A(3, 0) = sum(sum(x2.mul(IxIy)))[0];
    A(4, 0) = sum(sum(xy.mul(IxIy)))[0];
    A(5, 0) = sum(sum(grid_c.mul(IxIy)))[0];
    A(6, 0) = -sum(sum(x2.mul(IxG)))[0];
    A(7, 0) = -sum(sum(xy.mul(IxG)))[0];

    A(1, 1) = sum(sum(y2.mul(Ix2)))[0];
    A(2, 1) = sum(sum(grid_r.mul(Ix2)))[0];
    A(3, 1) = A(4, 0);
    A(4, 1) = sum(sum(y2.mul(IxIy)))[0];
    A(5, 1) = sum(sum(grid_r.mul(IxIy)))[0];
    A(6, 1) = A(7, 0);
    A(7, 1) = -sum(sum(y2.mul(IxG)))[0];

    A(2, 2) = sum(sum(Ix2))[0];
    A(3, 2) = A(5, 0);
    A(4, 2) = A(5, 1);
    A(5, 2) = sum(sum(IxIy))[0];
    A(6, 2) = -sum(sum(grid_c.mul(IxG)))[0];
    A(7, 2) = -sum(sum(grid_r.mul(IxG)))[0];

    A(3, 3) = sum(sum(x2.mul(Iy2)))[0];
    A(4, 3) = sum(sum(xy.mul(Iy2)))[0];
    A(5, 3) = sum(sum(grid_c.mul(Iy2)))[0];
    A(6, 3) = -sum(sum(x2.mul(IyG)))[0];
    A(7, 3) = -sum(sum(xy.mul(IyG)))[0];

    A(4, 4) = sum(sum(y2.mul(Iy2)))[0];
    A(5, 4) = sum(sum(grid_r.mul(Iy2)))[0];
    A(6, 4) = A(7, 3);
    A(7, 4) = -sum(sum(y2.mul(IyG)))[0];

    A(5, 5) = sum(sum(Iy2))[0];
    A(6, 5) = -sum(sum(grid_c.mul(IyG)))[0];
    A(7, 5) = -sum(sum(grid_r.mul(IyG)))[0];

    A(6, 6) = sum(sum(x2.mul(G2)))[0];
    A(7, 6) = sum(sum(xy.mul(G2)))[0];

    A(7, 7) = sum(sum(y2.mul(G2)))[0];

    // Upper half values (A is symmetric)
    A(0, 1) = A(1, 0);
    A(0, 2) = A(2, 0);
    A(0, 3) = A(3, 0);
    A(0, 4) = A(4, 0);
    A(0, 5) = A(5, 0);
    A(0, 6) = A(6, 0);
    A(0, 7) = A(7, 0);
    
    A(1, 2) = A(2, 1);
    A(1, 3) = A(3, 1);
    A(1, 4) = A(4, 1);
    A(1, 5) = A(5, 1);
    A(1, 6) = A(6, 1);
    A(1, 7) = A(7, 1);
    
    A(2, 3) = A(3, 2);
    A(2, 4) = A(4, 2);
    A(2, 5) = A(5, 2);
    A(2, 6) = A(6, 2);
    A(2, 7) = A(7, 2);
    
    A(3, 4) = A(4, 3);
    A(3, 5) = A(5, 3);
    A(3, 6) = A(6, 3);
    A(3, 7) = A(7, 3);
    
    A(4, 5) = A(5, 4);
    A(4, 6) = A(6, 4);
    A(4, 7) = A(7, 4);
    
    A(5, 6) = A(6, 5);
    A(5, 7) = A(7, 5);
    
    A(6, 7) = A(7, 6);

    // Calculation of b
    b(0) = -sum(sum(imgDiff.mul(xIx)))[0];
    b(1) = -sum(sum(imgDiff.mul(yIx)))[0];
    b(2) = -sum(sum(imgDiff.mul(gradx)))[0];
    b(3) = -sum(sum(imgDiff.mul(xIy)))[0];
    b(4) = -sum(sum(imgDiff.mul(yIy)))[0];
    b(5) = -sum(sum(imgDiff.mul(grady)))[0];
    b(6) = sum(sum(imgDiff.mul(grid_c.mul(G))))[0];
    b(7) = sum(sum(imgDiff.mul(grid_r.mul(G))))[0];

    // Calculate affine transformation. We use Cholesky decomposition, as A is symmetric.
    Vec<double, 8> k = A.inv(DECOMP_CHOLESKY)*b;

    Matx<double, 3, 3> H(k(0) + 1., k(1), k(2), k(3), k(4) + 1., k(5), k(6), k(7), 1.);
    if(res.empty()) {
        res = new MapProjec(H);
    } else {
        MapProjec newTr(H);
        res->compose(newTr);
   }
}
void stereoSelectorCallback(const sensor_msgs::Image::ConstPtr& image_ptr)
{
    if(!capture_image)
    	return;

	

    cv_bridge::CvImagePtr cv_ptr;

    try
    {
      cv_ptr = cv_bridge::toCvCopy(image_ptr, sensor_msgs::image_encodings::RGB8);
    }
    catch (cv_bridge::Exception& e)
    {
		ROS_ERROR("cv_bridge exception: %s", e.what());
		return;
    }

    cv::cvtColor(cv_ptr->image, input_image, CV_BGR2RGB);

    string object_name;

    //input_image = crop_hand(input_image);

    float certainty = orbit->recognizeObject(input_image, object_name, Orbit::BAG_OF_WORDS_SVM);


	/*
	*	Clean stabilizer if gesture has not been seen in a while
	*/

	ros::Time now = ros::Time::now();
	ros::Duration diff_last_hand_received = now - last_hand_received;
	
	last_hand_received = now;

	if(diff_last_hand_received.toSec()>1)
	{
		for(unsigned int i = 0; i<stabilizer.size(); i++)
		{
			stabilizer[i] = 0;
		}
	}

    /*
     * Update stabilizer when the gesture is not recognized
     */
	if(certainty<(float)certainty_threshold)
	{
		for(unsigned int i = 0; i<stabilizer.size()-1; i++)
		{

			if(stabilizer[i]>0)
				stabilizer[i]--;
		}

		if(stabilizer[stabilizer.size()-1] < max_stabilizer)
			stabilizer[stabilizer.size()-1]++;

		return;
	}
	else
	{
		if(stabilizer[stabilizer.size()-1] >= 2)
			stabilizer[stabilizer.size()-1]-=2;
		else if(stabilizer[stabilizer.size()-1] == 1)
			stabilizer[stabilizer.size()-1]--;
	}






	/*
	 * Update stabilizer when gesture is known
	 */
	for(unsigned int i = 0; i<stabilizer.size()-1; i++)
	{
		if(object_name == hands[i])
		{
			if(stabilizer[i] < max_stabilizer)
				stabilizer[i]++;
		}
		else
		{	if(stabilizer[i]>0)
				stabilizer[i]--;
		}
	}


	/*
	 * Print Stabilizer values
	 */
	for(unsigned int i = 0; i<stabilizer.size(); i++)
	{
		if(i<stabilizer.size()-1)
			printf("%s: %d, ",hands[i].c_str(), stabilizer[i]);
		else
			printf("Not known: %d\n", stabilizer[i]);
	}



	/*
	 * Find maximum element of the stabilizer
	 */
	int max_stab = stabilizer[0];
	unsigned int max_index = 0;

	for(unsigned int i = 0; i<stabilizer.size(); i++)
	{
		if(stabilizer[i]>max_stab)
		{
			max_stab = stabilizer[i];
			max_index = i;
		}
	}


    ros::Duration dur = now-last_pub;

	if(max_stab >= stabilizer_threshold && dur.toSec()>2 && max_index < stabilizer.size()-1)
	{
		std_msgs::String msg_to_sent;

		msg_to_sent.data = hands[max_index];

		hand_pub.publish(msg_to_sent);

		last_pub = now;

		for(unsigned int i = 0; i<stabilizer.size(); i++)
		{
			stabilizer[i] = 0;
		}
	}




}
示例#11
0
  cv::Point2f calcLocation(cv::Mat query_img) {

    std::vector<cv::KeyPoint> kp_query; // Keypoints of the query image
    cv::Mat des_query;
    cv::Mat query_img_gray;
    
    cv::cvtColor(query_img,
                 query_img_gray,
                 cv::COLOR_BGR2GRAY);


    detector->detectAndCompute(query_img_gray, 
                               cv::noArray(), 
                               kp_query,
                               des_query);


    std::vector< std::vector<cv::DMatch> > matches;
   
    matcher.knnMatch(des_ref, 
                     des_query, 
                     matches,
                     2);


    std::vector<cv::KeyPoint> matched_query, matched_ref, inliers_query, inliers_ref;
    std::vector<cv::DMatch> good_matches;

   //-- Localize the object
    std::vector<cv::Point2f> pts_query;
    std::vector<cv::Point2f> pts_ref;
    
  for(size_t i = 0; i < matches.size(); i++) {
    
    cv::DMatch first = matches[i][0];
    float dist_query = matches[i][0].distance;
    float dist_ref = matches[i][1].distance;
    
    if (dist_query < match_ratio * dist_ref) {
      
      matched_query.push_back(kp_query[first.queryIdx]);
      matched_ref.push_back(kp_ref[first.trainIdx]);
      
      pts_query.push_back(kp_query[first.queryIdx].pt);
      pts_ref.push_back(kp_ref[first.trainIdx].pt);
      
    }
  }


  cv::Mat mask; 

  // Homograpy
  cv::Mat homography;
  
  homography = cv::findHomography(pts_query, 
				  pts_ref,
				  cv::RANSAC,
				  5,
				  mask);


  // Input Quadilateral or Image plane coordinates
  std::vector<cv::Point2f> centers(1), centers_transformed(1);

  cv::Point2f center(query_img_gray.rows / 2,
                     query_img_gray.cols / 2);
  
  cv::Point2f center_transformed(query_img.rows / 2,
                                 query_img.cols / 2);
  
  centers[0] = center; // Workaround for using perspective transform
  
  cv::perspectiveTransform(centers,
                           centers_transformed,
                           homography);

  center_transformed = centers_transformed[0];

  return center_transformed;
  
  }
示例#12
0
void AllignedFrameSource::reset()
{
    base_->reset();
}
示例#13
0
void DegradeFrameSource::reset()
{
    base_->reset();
}
 void simpleMatching(
         const cv::Mat& descriptors_0, const cv::Mat& descriptors_1,
         std::vector<cv::DMatch>& matches)
 {
     matcher_->match(descriptors_0, descriptors_1, matches);
 }
示例#15
0
  /*
   * Initializes annotator
   */
  TyErrorId initialize(AnnotatorContext &ctx)
  {
    outInfo("initialize");

    if(ctx.isParameterDefined("keypointDetector"))
    {
      ctx.extractValue("keypointDetector", keypointDetector);
    }
    else
    {
      outError("no keypoint detector provided!");
      return UIMA_ERR_ANNOTATOR_MISSING_INIT;
    }

    if(ctx.isParameterDefined("featureExtractor"))
    {
      ctx.extractValue("featureExtractor", featureExtractor);
    }
    else
    {
      outError("no feature extractor provided!");
      return UIMA_ERR_ANNOTATOR_MISSING_INIT;
    }

    outDebug("creating " << keypointDetector << " key points detector...");
    detector = cv::FeatureDetector::create(keypointDetector);
    if(detector.empty())
    {
      outError("creation failed!");
      return UIMA_ERR_ANNOTATOR_MISSING_INIT;
    }

#if OUT_LEVEL == OUT_LEVEL_DEBUG
    printParams(detector);
#endif
    setupAlgorithm(detector);

    outDebug("creating " << featureExtractor << " feature extractor...");
    extractor = cv::DescriptorExtractor::create(featureExtractor);
    if(extractor.empty())
    {
      outError("creation failed!");
      return UIMA_ERR_ANNOTATOR_MISSING_INIT;
    }

#if OUT_LEVEL == OUT_LEVEL_DEBUG
    printParams(extractor);
#endif
    setupAlgorithm(extractor);

    if(featureExtractor == "SIFT" || featureExtractor == "SURF")
    {
      featureType = "numerical";
    }
    else
    {
      featureType = "binary";
    }

    return UIMA_ERR_NONE;
  }
/*****************************************************************************
 // MAIN
 */
int main(int argc, const char * argv[])
{
    
    
    
    //*************************************************************************
    // 1. Read the input files
    // This code reads the arguments from the input variable argv which is supposed to contain the
    // path of the input and reference database.
    std::string teachdb_folder, querydb_folder;
    if (argc > 2)
    {
        std::string command = argv[2];
        std::string type   = argv[1];
        if(type.compare("-SIFT")== 0)
        {
            _ftype = SIFT;
            std::cout << "NFT with SIFT feature detector and extractor." << std::endl;
        }
        else if(type.compare("-SURF")== 0)
        {
            _ftype = SURF;
            std::cout << "NFT with SURF feature detector and extractor." << std::endl;
        }
        else if(type.compare("-ORB")== 0)
        {
            _ftype = ORB;
            std::cout << "NFT with ORB feature detector and extractor." << std::endl;
        }
        
        
        if(command.compare("-file") == 0)
        {
            if(argc > 4)
            {
                teachdb_folder = argv[3];
                querydb_folder = argv[4];
                run_video = false;
            }
            else
            {
                std::cout << "No folder with query or reference images has been specified" << std::endl;
                std::cout << "Call: ./HCI571X_Feature_Matching -file folder_reference folder_query" << std::endl;
                system("pause");
                exit(0);
            }
            
        }
        else if(command.compare("-video") == 0)
        {
            run_video = true;
            if(argc > 4)
            {
                teachdb_folder = argv[3];
                device_id = atoi(argv[4]);
            }
        }
    }
    else
    {
        std::cout << "No command has been specified. use -file or -video" << std::endl;
        system("pause");
        exit(0);
    }
    
    
    
    // Read the filenames inside the teach database directory.
    std::vector<std::string> ref_filename;
    readDirFiles(teachdb_folder, &ref_filename);
    
    
    // Read the filenames inside the query database directory.
    std::vector<std::string> query_filename;
    readDirFiles(querydb_folder, &query_filename);
    
    
    //*************************************************************************
    // 2. Create a detector and a descriptor extractor
    // In this case, the SIFT detector and extractor are used
    
    // Corner detector
    if(_ftype == SIFT)_detector = new cv::SiftFeatureDetector(_num_feature, _octaves, _contrast_threshold, _edge_threshold, _sigma);
    else if(_ftype == SURF)_detector = new cv::SurfFeatureDetector( _hessianThreshold, _surf_Octaves, _surf_OctaveLayers, _surf_extended, _surf_upright );
    else if(_ftype == ORB)_detector = new cv::OrbFeatureDetector(1000);
    
    
    // Corner extractor
    if(_ftype == SIFT) _extractor = new cv::SiftDescriptorExtractor(_num_feature, _octaves, _contrast_threshold, _edge_threshold, _sigma);
    else if(_ftype == SURF) _extractor = new cv::SurfDescriptorExtractor( _hessianThreshold, _surf_Octaves, _surf_OctaveLayers, _surf_extended, _surf_upright );
    else if(_ftype == ORB)_extractor = new cv::OrbDescriptorExtractor(1000);
    

	// Check whether files are in the database list. 
	if(ref_filename.size() == 0)
	{
		std::cout << "STOP: no files in the reference database!!! Specify a folder or a set of files." << std::cout;
		system("pause");
		return -1;
	}

    //*************************************************************************
    // 3. Init the database
    // The code reads all the images in ref_filename, detect keypoints, extract descriptors and
    // stores them in the datbase variables.
    init_database(std::string(teachdb_folder), ref_filename);
    
    
    //*************************************************************************
    // 4. The data of the database _descriptorsRefDB is added to the featue matcher
    // and the mathcer is trained
    _matcher.add(_descriptorsRefDB);
    _matcher.train();
    
    // Read the number of reference images in the database
    _num_ref_images = _matcher.getTrainDescriptors().size();
    
    
    //*************************************************************************
    // 5. Here we run the matching.
    // for images from files
    if(!run_video)
    {
        if(_mtype == KNN)run_matching( querydb_folder, query_filename);
        else if(_mtype == BRUTEFORCE) run_bf_matching(querydb_folder, query_filename);
        else
        {
            std::cout << "No matching type specified. Specify a matching type" << std::endl;
            system("pause");
        }
        
    }
    else
        // and image from a video camera
    {
        if(_mtype == KNN)run_matching( device_id);
        else if(_mtype == BRUTEFORCE)  run_bf_matching(device_id);
        else
        {
            std::cout << "No matching type specified. Specify a matching type" << std::endl;
            system("pause");
        }
        
        
    }
    
    //*************************************************************************
    // 6. Cleanup: release the keypoint detector and feature descriptor extractor
    _extractor.release();
    _detector.release();
    
    
    return 1;
}
    std::vector<bbox_t> tracking_flow(cv::Mat dst_mat, bool check_error = true)
    {
        if (sync_PyrLKOpticalFlow_gpu.empty()) {
            std::cout << "sync_PyrLKOpticalFlow_gpu isn't initialized \n";
            return cur_bbox_vec;
        }

        int const old_gpu_id = cv::cuda::getDevice();
        if(old_gpu_id != gpu_id)
            cv::cuda::setDevice(gpu_id);

        if (dst_mat_gpu.cols == 0) {
            dst_mat_gpu = cv::cuda::GpuMat(dst_mat.size(), dst_mat.type());
            dst_grey_gpu = cv::cuda::GpuMat(dst_mat.size(), CV_8UC1);
        }

        //dst_grey_gpu.upload(dst_mat, stream);    // use BGR
        dst_mat_gpu.upload(dst_mat, stream);
        cv::cuda::cvtColor(dst_mat_gpu, dst_grey_gpu, CV_BGR2GRAY, 1, stream);

        if (src_grey_gpu.rows != dst_grey_gpu.rows || src_grey_gpu.cols != dst_grey_gpu.cols) {
            stream.waitForCompletion();
            src_grey_gpu = dst_grey_gpu.clone();
            cv::cuda::setDevice(old_gpu_id);
            return cur_bbox_vec;
        }

        ////sync_PyrLKOpticalFlow_gpu.sparse(src_grey_gpu, dst_grey_gpu, prev_pts_flow_gpu, cur_pts_flow_gpu, status_gpu, &err_gpu);    // OpenCV 2.4.x
        sync_PyrLKOpticalFlow_gpu->calc(src_grey_gpu, dst_grey_gpu, prev_pts_flow_gpu, cur_pts_flow_gpu, status_gpu, err_gpu, stream);    // OpenCV 3.x

        cv::Mat cur_pts_flow_cpu;
        cur_pts_flow_gpu.download(cur_pts_flow_cpu, stream);

        dst_grey_gpu.copyTo(src_grey_gpu, stream);

        cv::Mat err_cpu, status_cpu;
        err_gpu.download(err_cpu, stream);
        status_gpu.download(status_cpu, stream);

        stream.waitForCompletion();

        std::vector<bbox_t> result_bbox_vec;

        if (err_cpu.cols == cur_bbox_vec.size() && status_cpu.cols == cur_bbox_vec.size())
        {
            for (size_t i = 0; i < cur_bbox_vec.size(); ++i)
            {
                cv::Point2f cur_key_pt = cur_pts_flow_cpu.at<cv::Point2f>(0, i);
                cv::Point2f prev_key_pt = prev_pts_flow_cpu.at<cv::Point2f>(0, i);

                float moved_x = cur_key_pt.x - prev_key_pt.x;
                float moved_y = cur_key_pt.y - prev_key_pt.y;

                if (abs(moved_x) < 100 && abs(moved_y) < 100 && good_bbox_vec_flags[i])
                    if (err_cpu.at<float>(0, i) < flow_error && status_cpu.at<unsigned char>(0, i) != 0 &&
                        ((float)cur_bbox_vec[i].x + moved_x) > 0 && ((float)cur_bbox_vec[i].y + moved_y) > 0)
                    {
                        cur_bbox_vec[i].x += moved_x + 0.5;
                        cur_bbox_vec[i].y += moved_y + 0.5;
                        result_bbox_vec.push_back(cur_bbox_vec[i]);
                    }
                    else good_bbox_vec_flags[i] = false;
                else good_bbox_vec_flags[i] = false;

                //if(!check_error && !good_bbox_vec_flags[i]) result_bbox_vec.push_back(cur_bbox_vec[i]);
            }
        }

        cur_pts_flow_gpu.swap(prev_pts_flow_gpu);
        cur_pts_flow_cpu.copyTo(prev_pts_flow_cpu);

        if (old_gpu_id != gpu_id)
            cv::cuda::setDevice(old_gpu_id);

        return result_bbox_vec;
    }
/*****************************************************************************
 Inits the database by loading all images from a certain directory, extracts the feature
 keypoints and descriptors and saves them in the database
 -keypointsRefDB and _descriptorsRefDB
 @param directory_path: the path of all input images for query, e.g., C:/my_database
 @param files: a std::vector object with a list of filenames.
 */
void init_database(std::string directory_path, std::vector<std::string> files)
{
    
    // This code reads the image names from the argv variable, loads the image
    // detect keypoints, extract the descriptors and push everything into
    // two database objects, indicate by the ending DB.
    for (int i = 0; i<files.size(); i++)
    {
        FeatureMap fm;
        
        // Fetch the ref. image name from the input array
        fm._ref_image_str =  directory_path;
        fm._ref_image_str.append("/");
        fm._ref_image_str.append( files[i]);
        
        bool ret = exists_test(fm._ref_image_str);
        if (ret == false) continue;
        
        
        // Load the image
        fm._ref_image = cv::imread(fm._ref_image_str);
        cvtColor( fm._ref_image ,  fm._ref_image , CV_BGR2GRAY);
        
        // Detect the keypoints
        _detector->detect(fm._ref_image,fm._keypointsRef);
        
        std::cout << "For referemce image " << fm._ref_image_str << " - found " << fm._keypointsRef.size() << " keypoints" << std::endl;
        
        // If keypoints found
        if(fm._keypointsRef.size() > 0)
        {
            // extract descriptors
            _extractor->compute( fm._ref_image, fm._keypointsRef, fm._descriptorRef);
            
            // push descriptors and keypoints into database
            _keypointsRefDB.push_back(fm._keypointsRef);
            _descriptorsRefDB.push_back(fm._descriptorRef);
            
            // count the total number of feature descriptors and keypoints in the database.
            _num_ref_features_in_db += fm._keypointsRef.size();
        }
        
        // Draw the keypoints
        /*
         cv::Mat out;
         cv::drawKeypoints(fm._ref_image , fm._keypointsRef, out);
         cv::imshow("out", out );
         cv::waitKey();
         
         out.release();
         */
        
        feature_map_database.push_back(fm);
        
    }
    
    // Converting the total number of features to a string.
    std::strstream conv;
    conv << _num_ref_features_in_db;
    conv >> _num_ref_features_in_db_str;
    
}
示例#19
0
void backgroundSubstractionDetection(cv::Mat sequence, std::vector<cv::Rect> &detectedPedestrianFiltered, cv::Ptr<cv::BackgroundSubtractor> &pMOG2, trackingOption &tracking)
{
    int threshold = 150;
    cv::Mat mask;
    cv::Mat sequenceGrayDiff;
    std::vector<std::vector<cv::Point> > contours;
    std::vector<cv::Vec4i> hierarchy;
    std::vector<std::vector<cv::Point> > contours_poly;
    std::vector<cv::Rect> detectedPedestrian;

    pMOG2->apply(sequence,sequenceGrayDiff);


    cv::threshold(sequenceGrayDiff, mask, threshold, 255, cv::THRESH_BINARY);

    cv::erode(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(6,6)));
    cv::dilate(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(25,55)));
    cv::erode(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3,6)));


    /*
    cv::Mat dist;
    cv::distanceTransform(mask, dist, CV_DIST_L2, 3);
    cv::normalize(dist, dist, 0, 1., cv::NORM_MINMAX);
    cv::threshold(dist, dist, .4, 1., CV_THRESH_BINARY);
    cv::imshow("temp", dist);
    */

    cv::findContours(mask, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE, cv::Point(0,0));

    contours_poly.resize(contours.size());
    detectedPedestrian.resize(contours.size());

    for(size_t j=0;j<contours.size();j++)
    {
        cv::approxPolyDP(cv::Mat(contours[j]), contours_poly[j], 3, true);
        detectedPedestrian[j] = cv::boundingRect(cv::Mat(contours_poly[j]));


        //test
        /*
        double pix = 30;
        if(detectedPedestrian[j].x >= pix)
            detectedPedestrian[j].x -= pix;
        else
            detectedPedestrian[j].x = 0;
        if((detectedPedestrian[j].x+detectedPedestrian[j].width) <= (sequence.cols-pix))
            detectedPedestrian[j].width += pix;
        else
            detectedPedestrian[j].width = sequence.cols - detectedPedestrian[j].x;
        if(detectedPedestrian[j].y >= pix)
            detectedPedestrian[j].y -= pix;
        else
            detectedPedestrian[j].y = 0;
        if((detectedPedestrian[j].y+detectedPedestrian[j].height) <= (sequence.rows-pix))
            detectedPedestrian[j].height += pix;
        else
            detectedPedestrian[j].height = sequence.rows - detectedPedestrian[j].y;
        */
    }

    if(detectedPedestrian.size() != 0)
    {
        tracking = GOOD_FEATURES_TO_TRACK;
        detectedPedestrianFiltered.clear();
        detectedPedestrianFiltered.resize(detectedPedestrian.size());
        detectedPedestrianFiltered = detectedPedestrian;
    }
    else
        tracking = NOTHING_TO_TRACK;

}
/*****************************************************************************
 // This applies a brute force match without a trained datastructure.
 // It also calculates the two nearest neigbors.
 // @paraam query_image: the input image
 // @param matches_out: a pointer that stores the output matches. It is necessary for
 //                     pose estimation.
 */
int brute_force_match(cv::Mat& query_image,  std::vector< cv::DMatch> * matches_out)
{
    
    // variabels that keep the query keypoints and query descriptors
    std::vector<cv::KeyPoint>           keypointsQuery;
    cv::Mat                             descriptorQuery;
    
    // Temporary variables for the matching results
    std::vector< std::vector< cv::DMatch> > matches1;
    std::vector< std::vector< cv::DMatch> > matches2;
    std::vector< std::vector< cv::DMatch> > matches_opt1;
    
    
    //////////////////////////////////////////////////////////////////////
    // 1. Detect the keypoints
    // This line detects keypoints in the query image
    _detector->detect(query_image, keypointsQuery);
    
    
    
    // If keypoints were found, descriptors are extracted.
    if(keypointsQuery.size() > 0)
    {
        // extract descriptors
        _extractor->compute( query_image, keypointsQuery, descriptorQuery);
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Found " << descriptorQuery.size() << " feature descriptors in the image."  << std::endl;
#endif
    
    
    //////////////////////////////////////////////////////////////////////////////
    // 2. Here we match the descriptors with all descriptors in the database
    // with k-nearest neighbors with k=2
    
    int max_removed = INT_MAX;
    int max_id = -1;
    
    for(int i=0; i<_descriptorsRefDB.size(); i++)
    {
        std::vector< std::vector< cv::DMatch> > matches_temp1;
        
        // Here we match all query descriptors agains all db descriptors and try to find
        // matching descriptors
        _brute_force_matcher.knnMatch( descriptorQuery, _descriptorsRefDB[i],  matches_temp1, 2);
        
        
        ///////////////////////////////////////////////////////
        // 3. Refinement; Ratio test
        // The ratio test only accept matches which are clear without ambiguity.
        // The best hit must be closer to the query descriptors than the second hit.
        int removed = ratioTest(matches_temp1);
        
        
        
        // We only accept the match with the highest number of hits / the vector with the minimum revmoved features
        int num_matches = matches_temp1.size();
        if(removed < max_removed)
        {
            max_removed = removed;
            max_id = i;
            matches1.clear();
            matches1 = matches_temp1;
        }
    }
    
#ifdef DEBUG_OUT
    std::cout << "Feature map number " << max_id << " has the highest hit with "<< matches1.size() -  max_removed << " descriptors." << std::endl;
#endif
    
    
    std::vector< std::vector< cv::DMatch> > matches_temp2;
    
    // Here we match all query descriptors agains all db descriptors and try to find
    // matching descriptors
    _brute_force_matcher.knnMatch(_descriptorsRefDB[max_id],  descriptorQuery,  matches_temp2, 2);
    
    // The ratio test only accept matches which are clear without ambiguity.
    // The best hit must be closer to the query descriptors than the second hit.
    int removed = ratioTest(matches_temp2);
    
    
    
    
    ///////////////////////////////////////////////////////
    // 6. Refinement; Symmetry test
    // We only accept matches which appear in both knn-matches.
    // It should not matter whether we test the database against the query desriptors
    // or the query descriptors against the database.
    // If we do not find the same solution in both directions, we toss the match.
    std::vector<cv::DMatch> symMatches;
    symmetryTest(  matches1, matches_temp2, symMatches);
#ifdef DEBUG_OUT
    std::cout << "Kept " << symMatches.size() << " matches after symetry test test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 7. Refinement; Epipolar constraint
    // We perform a Epipolar test using the RANSAC method.
    if(symMatches.size() > 25)
    {
        matches_out->clear();
        ransacTest( symMatches,  _keypointsRefDB[max_id], keypointsQuery, *matches_out);
        
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Kept " << matches_out->size() << " matches after RANSAC test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 8.  Draw this image on screen.
    cv::Mat out;
    cv::drawMatches(feature_map_database[max_id]._ref_image , _keypointsRefDB[max_id], query_image, keypointsQuery, *matches_out, out, cv::Scalar(255,255,255), cv::Scalar(0,0,255));
    
    std::string num_matches_str;
    std::strstream conv;
    conv << matches_out->size();
    conv >> num_matches_str;
    
    std::string text;
    text.append( num_matches_str);
    text.append("( " + _num_ref_features_in_db_str + " total)");
    text.append(" matches were found in reference image ");
    text.append( feature_map_database[max_id]._ref_image_str);
    
    putText(out, text, cvPoint(20,20),
            cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,255,255), 1, CV_AA);
    
    cv::imshow("result", out);
    if (run_video) cv::waitKey(1);
    else cv::waitKey();
    
    
    
    // Delete the images
    query_image.release();
    out.release();
    
    
    return max_id;
    
}
示例#21
0
bool CvCapture_OpenNI::setCommonProperty( int propIdx, double propValue )
{
    bool isSet = false;

    switch( propIdx )
    {
    // There is a set of properties that correspond to depth generator by default
    // (is they are pass without particular generator flag).
    case CV_CAP_PROP_OPENNI_REGISTRATION:
        isSet = setDepthGeneratorProperty( propIdx, propValue );
        break;
    case CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC :
        if( propValue && depthGenerator.IsValid() && imageGenerator.IsValid() )
        {
            // start synchronization
            if( approxSyncGrabber.empty() )
            {
                approxSyncGrabber = new ApproximateSyncGrabber( context, depthGenerator, imageGenerator, maxBufferSize, isCircleBuffer, maxTimeDuration );
            }
            else
            {
                approxSyncGrabber->finish();

                // update params
                approxSyncGrabber->setMaxBufferSize(maxBufferSize);
                approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
                approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
            }
            approxSyncGrabber->start();
        }
        else if( !propValue && !approxSyncGrabber.empty() )
        {
            // finish synchronization
            approxSyncGrabber->finish();
        }
        break;
    case CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE :
        maxBufferSize = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxBufferSize(maxBufferSize);
        break;
    case CV_CAP_PROP_OPENNI_CIRCLE_BUFFER :
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
        break;
    case CV_CAP_PROP_OPENNI_MAX_TIME_DURATION :
        maxTimeDuration = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
        break;
    default:
    {
        std::stringstream ss;
        ss << "Such parameter (propIdx=" << propIdx << ") isn't supported for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
/*****************************************************************************
 // The knn matching with k = 2
 // This code performs the matching and the refinement.
 // @paraam query_image: the input image
 // @param matches_out: a pointer that stores the output matches. It is necessary for
 //                     pose estimation.
 */
int knn_match(cv::Mat& query_image,  std::vector< cv::DMatch> * matches_out)
{
    // variabels that keep the query keypoints and query descriptors
    std::vector<cv::KeyPoint>           keypointsQuery;
    cv::Mat                             descriptorQuery;
    
    // Temporary variables for the matching results
    std::vector< std::vector< cv::DMatch> > matches1;
    std::vector< std::vector< cv::DMatch> > matches2;
    std::vector< std::vector< cv::DMatch> > matches_opt1;
    
    
    //////////////////////////////////////////////////////////////////////
    // 1. Detect the keypoints
    // This line detects keypoints in the query image
    _detector->detect(query_image, keypointsQuery);
    
    // If keypoints were found, descriptors are extracted.
    if(keypointsQuery.size() > 0)
    {
        // extract descriptors
        _extractor->compute( query_image, keypointsQuery, descriptorQuery);
        
    }
    
    //////////////////////////////////////////////////////////////////////////////
    // 2. Here we match the descriptors with the database descriptors.
    // with k-nearest neighbors with k=2
    _matcher.knnMatch(descriptorQuery , matches1, 2);
    
#ifdef DEBUG_OUT
    std::cout << "Found " << matches1.size() << " matching feature descriptors out of " << _matcher.getTrainDescriptors().size() << " database descriptors."  << std::endl;
#endif
    
    
    //////////////////////////////////////////////////////////////////////////////
    // 3 Filter the matches.
    // Accept only matches (knn with k=2) which belong ot one images
    // The database tree within _matcher contains descriptors of all input images.
    // We expect that both nearest neighbors must belong to one image.
    // Otherwise we can remove the result.
    // Along with this, we count which reference image has the highest number of matches.
    // At this time, we consinder this image as the searched image.
    
    // we init the variable hit with 0
    std::vector<int> hits(_num_ref_images);
    for (int i=0; i<_num_ref_images; i++)
    {
        hits[i] = 0;
    }
    
    // the loop runs through all matches and comparees the image indes
    // imgIdx. The must be equal otherwise the matches belong to two
    // different reference images.
    for (int i=0; i<matches1.size(); i++)
    {
        // The comparison.
        if(matches1[i].at(0).imgIdx == matches1[i].at(1).imgIdx)
        {
            // we keep it
            matches_opt1.push_back(matches1[i]);
            // and count a hit
            hits[matches1[i].at(0).imgIdx]++;
        }
    }
    
#ifdef DEBUG_OUT
    std::cout << "Optimized " << matches_opt1.size() << " feature descriptors." << std::endl;
#endif
    
    // Now we search for the highest number of hits in our hit array
    // The variable max_idx keeps the image id.
    // The variable max_value the amount of hits.
    int max_idx = -1;
    int max_value = 0;
    for (int i=0; i<_num_ref_images; i++)
    {
#ifdef DEBUG_OUT
        std::cout << "for " << i << " : " << hits[i] << std::endl;
#endif
        if(hits[i]  > max_value)
        {
            max_value = hits[i];
            max_idx = i;
        }
    }
    
    
    
    ///////////////////////////////////////////////////////
    // 4. The cross-match
    // At this time, we test the database agains the query descriptors.
    // The variable max_id stores the reference image id. Thus, we test only
    // the descriptors that belong to max_idx agains the query descriptors.
    _matcher.knnMatch(_descriptorsRefDB[max_idx], descriptorQuery, matches2, 2);
    
    
    ///////////////////////////////////////////////////////
    // 5. Refinement; Ratio test
    // The ratio test only accept matches which are clear without ambiguity.
    // The best hit must be closer to the query descriptors than the second hit.
    int removed = ratioTest(matches_opt1);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    removed = ratioTest(matches2);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 6. Refinement; Symmetry test
    // We only accept matches which appear in both knn-matches.
    // It should not matter whether we test the database against the query desriptors
    // or the query descriptors against the database.
    // If we do not find the same solution in both directions, we toss the match.
    std::vector<cv::DMatch> symMatches;
    symmetryTest(  matches_opt1, matches2, symMatches);
#ifdef DEBUG_OUT
    std::cout << "Kept " << symMatches.size() << " matches after symetry test test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 7. Refinement; Epipolar constraint
    // We perform a Epipolar test using the RANSAC method.
    if(symMatches.size() > 25)
    {
        matches_out->clear();
        ransacTest( symMatches,  _keypointsRefDB[max_idx], keypointsQuery, *matches_out);
        
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Kept " << matches_out->size() << " matches after RANSAC test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 8.  Draw this image on screen.
    cv::Mat out;
    cv::drawMatches(feature_map_database[max_idx]._ref_image , _keypointsRefDB[max_idx], query_image, keypointsQuery, *matches_out, out, cv::Scalar(255,255,255), cv::Scalar(0,0,255));
    
    std::string num_matches_str;
    std::strstream conv;
    conv << matches_out->size();
    conv >> num_matches_str;
    
    std::string text;
    text.append( num_matches_str);
    text.append("( " + _num_ref_features_in_db_str + " total)");
    text.append(" matches were found in reference image ");
    text.append( feature_map_database[max_idx]._ref_image_str);
    
    putText(out, text, cvPoint(20,20),
            cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,255,255), 1, CV_AA);
    
    cv::imshow("result", out);
    if (run_video) cv::waitKey(1);
    else cv::waitKey();
    
    
    
    // Delete the images
    query_image.release();
    out.release();
    
    
    
    return max_idx;
    
}
示例#23
0
文件: node.cpp 项目: trantu/fu_tools
Node::Node(ros::NodeHandle& nh, const cv::Mat& visual,
    cv::Ptr<cv::FeatureDetector> detector,
    cv::Ptr<cv::DescriptorExtractor> extractor,
    cv::Ptr<cv::DescriptorMatcher> matcher,
    const sensor_msgs::PointCloud2ConstPtr point_cloud,
    const cv::Mat& detection_mask)
: id_(0), 
flannIndex(NULL),
matcher_(matcher)
{
#ifdef USE_ICP_CODE
  gicp_initialized = false;
#endif
  std::clock_t starttime=std::clock();

#ifdef USE_SIFT_GPU
  SiftGPUFeatureDetector* siftgpu = SiftGPUFeatureDetector::GetInstance();
  float* descriptors = siftgpu->detect(visual, feature_locations_2d_);
  if (descriptors == NULL) {
    ROS_FATAL("Can't run SiftGPU");
  }
#else
  ROS_FATAL_COND(detector.empty(), "No valid detector!");
  detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
#endif

  ROS_INFO("Feature detection and descriptor extraction runtime: %f", ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC);
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );

  /*
    if (id_  == 0)
        cloud_pub_ = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_base",10);
    else{
   */
  cloud_pub_ = nh.advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
  //   cloud_pub_ransac = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_current_ransac",10);
  //} */

  // get pcl::Pointcloud to extract depthValues a pixel positions
  std::clock_t starttime5=std::clock();
  // TODO: If batch sending/saving of clouds would be removed, the pointcloud wouldn't have to be saved
  // which would slim down the Memory requirements
  pcl::fromROSMsg(*point_cloud,pc_col);
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "pc2->pcl conversion runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );

  // project pixels to 3dPositions and create search structures for the gicp
#ifdef USE_SIFT_GPU
  // removes also unused descriptors from the descriptors matrix
  // build descriptor matrix
  projectTo3DSiftGPU(feature_locations_2d_, feature_locations_3d_, pc_col, descriptors, feature_descriptors_); //takes less than 0.01 sec

  if (descriptors != NULL) delete descriptors;

#else
  projectTo3D(feature_locations_2d_, feature_locations_3d_, pc_col); //takes less than 0.01 sec
#endif

  // projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
#ifdef USE_ICP_CODE
  std::clock_t starttime4=std::clock();
  createGICPStructures(); 
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "gicp runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );
#endif

  std::clock_t starttime2=std::clock();
#ifndef USE_SIFT_GPU
//  ROS_INFO("Use extractor");
  //cv::Mat topleft, topright;
  //topleft = visual.colRange(0,visual.cols/2+50);
  //topright= visual.colRange(visual.cols/2+50, visual.cols-1);
	//std::vector<cv::KeyPoint> kp1, kp2; 
  //extractor->compute(topleft, kp1, feature_descriptors_); //fill feature_descriptors_ with information 
  extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
#endif
  assert(feature_locations_2d_.size() == feature_locations_3d_.size());
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );

  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC  <<"sec");
}
示例#24
0
/*
generate the data needed to train a codebook/vocabulary for bag-of-words methods
*/
int generateVocabTrainData(std::string trainPath,
						   std::string vocabTrainDataPath,
						   cv::Ptr<cv::FeatureDetector> &detector,
						   cv::Ptr<cv::DescriptorExtractor> &extractor)
{

	//Do not overwrite any files
	std::ifstream checker;
	checker.open(vocabTrainDataPath.c_str());
	if(checker.is_open()) {	
		std::cerr << vocabTrainDataPath << ": Training Data already present" <<
			std::endl;
		checker.close();
		return -1;
	}

	//load training movie
	cv::VideoCapture movie;
	movie.open(trainPath);
	if (!movie.isOpened()) {
		std::cerr << trainPath << ": training movie not found" << std::endl;
		return -1;
	}

	//extract data
	std::cout << "Extracting Descriptors" << std::endl;
	cv::Mat vocabTrainData;
	cv::Mat frame, descs, feats;
	std::vector<cv::KeyPoint> kpts;
	
	std::cout.setf(std::ios_base::fixed); 
	std::cout.precision(0);
	
	while(movie.read(frame)) {

		//detect & extract features
		detector->detect(frame, kpts);
		extractor->compute(frame, kpts, descs);

		//add all descriptors to the training data 
		vocabTrainData.push_back(descs);

		//show progress
		cv::drawKeypoints(frame, kpts, feats);
		cv::imshow("Training Data", feats);
		
		std::cout << 100.0*(movie.get(CV_CAP_PROP_POS_FRAMES) / 
			movie.get(CV_CAP_PROP_FRAME_COUNT)) << "%. " << 
			vocabTrainData.rows << " descriptors         \r";
		fflush(stdout); 
		
		if(cv::waitKey(5) == 27) {
			cv::destroyWindow("Training Data");
			std::cout << std::endl;
			return -1;
		}

	}
	cv::destroyWindow("Training Data");
	std::cout << "Done: " << vocabTrainData.rows << " Descriptors" << std::endl;

	//save the training data
	cv::FileStorage fs;	
	fs.open(vocabTrainDataPath, cv::FileStorage::WRITE);
	fs << "VocabTrainData" << vocabTrainData;
	fs.release();

	return 0;
}
示例#25
0
void cv::gpu::VideoReader_GPU::open(const cv::Ptr<VideoSource>& source)
{
    CV_Assert( !source.empty() );
    close();
    impl_.reset(new Impl(source));
}
示例#26
0
/*
generate FabMap bag-of-words data : an image descriptor for each frame
*/
int generateBOWImageDescs(std::string dataPath,
							std::string bowImageDescPath,
							std::string vocabPath,
							cv::Ptr<cv::FeatureDetector> &detector,
							cv::Ptr<cv::DescriptorExtractor> &extractor,
							int minWords)
{
	
	cv::FileStorage fs;	

	//ensure not overwriting training data
	std::ifstream checker;
	checker.open(bowImageDescPath.c_str());
	if(checker.is_open()) {	
		std::cerr << bowImageDescPath << ": FabMap Training/Testing Data "
			"already present" << std::endl;
		checker.close();
		return -1;
	}

	//load vocabulary
	std::cout << "Loading Vocabulary" << std::endl;
	fs.open(vocabPath, cv::FileStorage::READ);
	cv::Mat vocab;
	fs["Vocabulary"] >> vocab;
	if (vocab.empty()) {
		std::cerr << vocabPath << ": Vocabulary not found" << std::endl;
		return -1;
	}
	fs.release();

	//use a FLANN matcher to generate bag-of-words representations
	cv::Ptr<cv::DescriptorMatcher> matcher = 
		cv::DescriptorMatcher::create("FlannBased");
	cv::BOWImgDescriptorExtractor bide(extractor, matcher);
	bide.setVocabulary(vocab);

	//load movie
	cv::VideoCapture movie;
	movie.open(dataPath);

	if(!movie.isOpened()) {
		std::cerr << dataPath << ": movie not found" << std::endl;
		return -1;
	}

	//extract image descriptors
	cv::Mat fabmapTrainData;
	std::cout << "Extracting Bag-of-words Image Descriptors" << std::endl;
	std::cout.setf(std::ios_base::fixed);
	std::cout.precision(0);

	std::ofstream maskw;
	
	if(minWords) {
		maskw.open(std::string(bowImageDescPath + "mask.txt").c_str());
	}

	cv::Mat frame, bow;
	std::vector<cv::KeyPoint> kpts;
	
	while(movie.read(frame)) {
		detector->detect(frame, kpts);
		bide.compute(frame, kpts, bow);

		if(minWords) {
			//writing a mask file
			if(cv::countNonZero(bow) < minWords) {
				//frame masked
				maskw << "0" << std::endl;
			} else {
				//frame accepted
				maskw << "1" << std::endl;
				fabmapTrainData.push_back(bow);
			}
		} else {
			fabmapTrainData.push_back(bow);
		}
		
		std::cout << 100.0 * (movie.get(CV_CAP_PROP_POS_FRAMES) / 
			movie.get(CV_CAP_PROP_FRAME_COUNT)) << "%    \r";
		fflush(stdout); 
	}
	std::cout << "Done                                       " << std::endl;
	
	movie.release();

	//save training data
	fs.open(bowImageDescPath, cv::FileStorage::WRITE);
	fs << "BOWImageDescs" << fabmapTrainData;
	fs.release();

	return 0;	
}
 static void callback_saturateColors(int, void*)
 {
     retina->setColorSaturation(true, (float)colorSaturationFactor);
 }
  void disparityCallback(const stereo_msgs::DisparityImageConstPtr& msg)
  {
    ROS_DEBUG("Pub Threshold:%f ", LineMOD_Detector::pub_threshold);
    if (!LineMOD_Detector::got_color)
    {
      return;
    }
    bool show_match_result = true;
    cv_bridge::CvImagePtr disp_ptr;
    try
    {
      disp_ptr = cv_bridge::toCvCopy(msg->image, "");
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("disp cv_bridge exception: %s", e.what());
      return;
    }

    float f = msg->f;
    float T = msg->T;
    cv::Mat depth_img = (f*T*1000)/(disp_ptr->image).clone();
    depth_img.convertTo(depth_img, CV_16U);
    LineMOD_Detector::sources.push_back(LineMOD_Detector::color_img);
    LineMOD_Detector::sources.push_back(depth_img);

      // Perform matching
      std::vector<cv::linemod::Match> matches;
      std::vector<cv::String> class_ids;
      std::vector<cv::Mat> quantized_images;

      LineMOD_Detector::detector->match(sources, (float)LineMOD_Detector::matching_threshold, matches, class_ids, quantized_images);

      LineMOD_Detector::num_classes = detector->numClasses();
      ROS_DEBUG("Num Classes: %u", LineMOD_Detector::num_classes);
      int classes_visited = 0;
      std::set<std::string> visited;

      ROS_DEBUG("Matches size: %u", (int)matches.size());
      for (int i = 0; (i < (int)matches.size()) && (classes_visited < LineMOD_Detector::num_classes); ++i)
      {
        cv::linemod::Match m = matches[i];
        ROS_DEBUG("Matching count: %u", i);

        if (visited.insert(m.class_id).second)
        {
          ++classes_visited;

          if (show_match_result)
          {
            ROS_DEBUG("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
                   m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
            printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
                   m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
          }

          // Draw matching template
          const std::vector<cv::linemod::Template>& templates = LineMOD_Detector::detector->getTemplates(m.class_id, m.template_id);
          drawResponse(templates, LineMOD_Detector::num_modalities, LineMOD_Detector::display, cv::Point(m.x, m.y), LineMOD_Detector::detector->getT(0));
          if (m.similarity > LineMOD_Detector::pub_threshold)
          {
            LineMOD_Detector::publishPoint(templates, m, depth_img, msg->header);
          }

        }
      }

      LineMOD_Detector::sources.clear();
  }
 static void callBack_updateRetinaParams(int, void*)
 {
     retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
 }
 void setThreshold( double threshold1, double threshold2, double threshold3 )
 {
     eigenfaceRecognizor->set("threshold", threshold1);
     fisherfaceRecognizor->set("threshold", threshold2);
     LBPHRecognizor->set("threshold", threshold3);
 }