Exemplo n.º 1
0
bool CvCapture_OpenNI::setCommonProperty( int propIdx, double propValue )
{
    bool isSet = false;

    switch( propIdx )
    {
    // There is a set of properties that correspond to depth generator by default
    // (is they are pass without particular generator flag).
    case CV_CAP_PROP_OPENNI_REGISTRATION:
        isSet = setDepthGeneratorProperty( propIdx, propValue );
        break;
    case CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC :
        if( propValue && depthGenerator.IsValid() && imageGenerator.IsValid() )
        {
            // start synchronization
            if( approxSyncGrabber.empty() )
            {
                approxSyncGrabber = new ApproximateSyncGrabber( context, depthGenerator, imageGenerator, maxBufferSize, isCircleBuffer, maxTimeDuration );
            }
            else
            {
                approxSyncGrabber->finish();

                // update params
                approxSyncGrabber->setMaxBufferSize(maxBufferSize);
                approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
                approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
            }
            approxSyncGrabber->start();
        }
        else if( !propValue && !approxSyncGrabber.empty() )
        {
            // finish synchronization
            approxSyncGrabber->finish();
        }
        break;
    case CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE :
        maxBufferSize = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxBufferSize(maxBufferSize);
        break;
    case CV_CAP_PROP_OPENNI_CIRCLE_BUFFER :
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
        break;
    case CV_CAP_PROP_OPENNI_MAX_TIME_DURATION :
        maxTimeDuration = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
        break;
    default:
    {
        std::stringstream ss;
        ss << "Such parameter (propIdx=" << propIdx << ") isn't supported for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
Exemplo n.º 2
0
inline void quality_test(cv::Ptr<quality::QualityBase> ptr, const TMat& cmp, const Scalar& expected, const std::size_t quality_maps_expected = 1, const bool empty_expected = false )
{
    std::vector<cv::Mat> qMats = {};
    ptr->getQualityMaps(qMats);
    EXPECT_TRUE( qMats.empty());

    quality_expect_near( expected, ptr->compute(cmp));

    if (empty_expected)
        EXPECT_TRUE(ptr->empty());
    else
        EXPECT_FALSE(ptr->empty());

    ptr->getQualityMaps(qMats);

    EXPECT_EQ( qMats.size(), quality_maps_expected);
    for (auto& qm : qMats)
    {
        EXPECT_GT(qm.rows, 0);
        EXPECT_GT(qm.cols, 0);
    }

    ptr->clear();
    EXPECT_TRUE(ptr->empty());
}
    DetectorAgregator(cv::Ptr<CascadeDetectorAdapter>& _mainDetector, cv::Ptr<CascadeDetectorAdapter>& _trackingDetector):
            mainDetector(_mainDetector),
            trackingDetector(_trackingDetector)
    {
        CV_Assert(!_mainDetector.empty());
        CV_Assert(!_trackingDetector.empty());

        DetectionBasedTracker::Parameters DetectorParams;
        tracker = new DetectionBasedTracker(mainDetector.ptr<DetectionBasedTracker::IDetector>(), trackingDetector.ptr<DetectionBasedTracker::IDetector>(), DetectorParams);
    }
 CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
         IDetector(),
         Detector(detector)
 {
     LOGD("CascadeDetectorAdapter::Detect::Detect");
     CV_Assert(!detector.empty());
 }
cv::DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork(DetectionBasedTracker& _detectionBasedTracker, cv::Ptr<DetectionBasedTracker::IDetector> _detector)
    :detectionBasedTracker(_detectionBasedTracker),
    cascadeInThread(),
    isObjectDetectingReady(false),
    shouldObjectDetectingResultsBeForgot(false),
    stateThread(STATE_THREAD_STOPPED),
    timeWhenDetectingThreadStartedWork(-1)
{
    CV_Assert(!_detector.empty());

    cascadeInThread = _detector;

    int res=0;
    res=pthread_mutex_init(&mutex, NULL);//TODO: should be attributes?
    if (res) {
        LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_mutex_init(&mutex, NULL) is %d", res);
        throw(std::exception());
    }
    res=pthread_cond_init (&objectDetectorRun, NULL);
    if (res) {
        LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_cond_init(&objectDetectorRun,, NULL) is %d", res);
        pthread_mutex_destroy(&mutex);
        throw(std::exception());
    }
    res=pthread_cond_init (&objectDetectorThreadStartStop, NULL);
    if (res) {
        LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_cond_init(&objectDetectorThreadStartStop,, NULL) is %d", res);
        pthread_cond_destroy(&objectDetectorRun);
        pthread_mutex_destroy(&mutex);
        throw(std::exception());
    }
}
Exemplo n.º 6
0
bool CvCapture_OpenNI::grabFrame()
{
    if( !isOpened() )
        return false;

    bool isGrabbed = false;
    if( !approxSyncGrabber.empty() && approxSyncGrabber->isRun() )
    {
        isGrabbed = approxSyncGrabber->grab( depthMetaData, imageMetaData );
    }
    else
    {
        XnStatus status = context.WaitAndUpdateAll();
        if( status != XN_STATUS_OK )
            return false;

        if( depthGenerator.IsValid() )
            depthGenerator.GetMetaData( depthMetaData );
        if( imageGenerator.IsValid() )
            imageGenerator.GetMetaData( imageMetaData );
        isGrabbed = true;
    }

    return isGrabbed;
}
void MapperGradShift::calculate(
    const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
    Mat gradx, grady, imgDiff;
    Mat img2;

    CV_DbgAssert(img1.size() == image2.size());

    if(!res.empty()) {
        // We have initial values for the registration: we move img2 to that initial reference
        res->inverseWarp(image2, img2);
    } else {
        img2 = image2;
    }

    // Get gradient in all channels
    gradient(img1, img2, gradx, grady, imgDiff);

    // Calculate parameters using least squares
    Matx<double, 2, 2> A;
    Vec<double, 2> b;
    // For each value in A, all the matrix elements are added and then the channels are also added,
    // so we have two calls to "sum". The result can be found in the first element of the final
    // Scalar object.

    A(0, 0) = sum(sum(gradx.mul(gradx)))[0];
    A(0, 1) = sum(sum(gradx.mul(grady)))[0];
    A(1, 1) = sum(sum(grady.mul(grady)))[0];
    A(1, 0) = A(0, 1);

    b(0) = -sum(sum(imgDiff.mul(gradx)))[0];
    b(1) = -sum(sum(imgDiff.mul(grady)))[0];

    // Calculate shift. We use Cholesky decomposition, as A is symmetric.
    Vec<double, 2> shift = A.inv(DECOMP_CHOLESKY)*b;

    if(res.empty()) {
        res = new MapShift(shift);
    } else {
        MapShift newTr(shift);
        res->compose(newTr);
   }
}
    std::vector<bbox_t> tracking_flow(cv::Mat new_dst_mat, bool check_error = true)
    {
        if (sync_PyrLKOpticalFlow.empty()) {
            std::cout << "sync_PyrLKOpticalFlow isn't initialized \n";
            return cur_bbox_vec;
        }

        cv::cvtColor(new_dst_mat, dst_grey, CV_BGR2GRAY, 1);

        if (src_grey.rows != dst_grey.rows || src_grey.cols != dst_grey.cols) {
            src_grey = dst_grey.clone();
            return cur_bbox_vec;
        }

        if (prev_pts_flow.cols < 1) {
            return cur_bbox_vec;
        }

        ////sync_PyrLKOpticalFlow_gpu.sparse(src_grey_gpu, dst_grey_gpu, prev_pts_flow_gpu, cur_pts_flow_gpu, status_gpu, &err_gpu);    // OpenCV 2.4.x
        sync_PyrLKOpticalFlow->calc(src_grey, dst_grey, prev_pts_flow, cur_pts_flow, status, err);    // OpenCV 3.x

        dst_grey.copyTo(src_grey);

        std::vector<bbox_t> result_bbox_vec;

        if (err.rows == cur_bbox_vec.size() && status.rows == cur_bbox_vec.size())
        {
            for (size_t i = 0; i < cur_bbox_vec.size(); ++i)
            {
                cv::Point2f cur_key_pt = cur_pts_flow.at<cv::Point2f>(0, i);
                cv::Point2f prev_key_pt = prev_pts_flow.at<cv::Point2f>(0, i);

                float moved_x = cur_key_pt.x - prev_key_pt.x;
                float moved_y = cur_key_pt.y - prev_key_pt.y;

                if (abs(moved_x) < 100 && abs(moved_y) < 100 && good_bbox_vec_flags[i])
                    if (err.at<float>(0, i) < flow_error && status.at<unsigned char>(0, i) != 0 &&
                        ((float)cur_bbox_vec[i].x + moved_x) > 0 && ((float)cur_bbox_vec[i].y + moved_y) > 0)
                    {
                        cur_bbox_vec[i].x += moved_x + 0.5;
                        cur_bbox_vec[i].y += moved_y + 0.5;
                        result_bbox_vec.push_back(cur_bbox_vec[i]);
                    }
                    else good_bbox_vec_flags[i] = false;
                else good_bbox_vec_flags[i] = false;

                //if(!check_error && !good_bbox_vec_flags[i]) result_bbox_vec.push_back(cur_bbox_vec[i]);
            }
        }

        prev_pts_flow = cur_pts_flow.clone();

        return result_bbox_vec;
    }
Exemplo n.º 9
0
Node::Node(ros::NodeHandle* nh,
        const cv::Mat& visual, const cv::Mat& depth,
        image_geometry::PinholeCameraModel cam_model,
        cv::Ptr<cv::FeatureDetector> detector,
        cv::Ptr<cv::DescriptorExtractor> extractor,
        cv::Ptr<cv::DescriptorMatcher> matcher,
        const sensor_msgs::PointCloud2ConstPtr& point_cloud,
        unsigned int msg_id,
        unsigned int id,
        const cv::Mat& detection_mask):
        nh_(nh),
        msg_id_(msg_id),
        id_(id),
        cloudMessage_(*point_cloud),
        cam_model_(cam_model),
        matcher_(matcher)
{
    std::clock_t starttime=std::clock();

    ROS_FATAL_COND(detector.empty(), "No valid detector!");
    detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );
    ROS_INFO("Found %d Keypoints", (int)feature_locations_2d_.size());

    cloud_pub = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
    cloud_pub2 = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/my_clouds",20);

    // get pcl::Pointcloud to extract depthValues a pixel positions
    std::clock_t starttime5=std::clock();
    // TODO: This takes 0.1 seconds and is not strictly necessary
    //pcl::fromROSMsg(*point_cloud,pc);
    pcl::fromROSMsg(*point_cloud,pc_col);
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );

    // project pixels to 3dPositions and create search structures for the gicp
    projectTo3D(depth, feature_locations_2d_, feature_locations_3d_,pc_col); //takes less than 0.01 sec

    std::clock_t starttime4=std::clock();
    // projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );

    std::clock_t starttime2=std::clock();
    extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
    assert(feature_locations_2d_.size() == feature_locations_3d_.size());
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );
    flannIndex = NULL;

    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC  <<"sec");


}
Exemplo n.º 10
0
void SuperResolution::RunTest(cv::Ptr<cv::superres::SuperResolution> superRes)
{
    const std::string inputVideoName = cvtest::TS::ptr()->get_data_path() + "car.avi";
    const int scale = 2;
    const int iterations = 100;
    const int temporalAreaRadius = 2;

    ASSERT_FALSE( superRes.empty() );

    const int btvKernelSize = superRes->getInt("btvKernelSize");

    superRes->set("scale", scale);
    superRes->set("iterations", iterations);
    superRes->set("temporalAreaRadius", temporalAreaRadius);

    cv::Ptr<cv::superres::FrameSource> goldSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale));
    cv::Ptr<cv::superres::FrameSource> lowResSource(new DegradeFrameSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale), scale));

    // skip first frame
    cv::Mat frame;

    lowResSource->nextFrame(frame);
    goldSource->nextFrame(frame);

    cv::Rect inner(btvKernelSize, btvKernelSize, frame.cols - 2 * btvKernelSize, frame.rows - 2 * btvKernelSize);

    superRes->setInput(lowResSource);

    double srAvgMSSIM = 0.0;
    const int count = 10;

    cv::Mat goldFrame, superResFrame;
    for (int i = 0; i < count; ++i)
    {
        goldSource->nextFrame(goldFrame);
        ASSERT_FALSE( goldFrame.empty() );

        superRes->nextFrame(superResFrame);
        ASSERT_FALSE( superResFrame.empty() );

        const double srMSSIM = MSSIM(goldFrame(inner), superResFrame);

        srAvgMSSIM += srMSSIM;
    }

    srAvgMSSIM /= count;

    EXPECT_GE( srAvgMSSIM, 0.5 );
}
Exemplo n.º 11
0
double CvCapture_OpenNI::getCommonProperty( int propIdx )
{
    double propValue = 0;

    switch( propIdx )
    {
    // There is a set of properties that correspond to depth generator by default
    // (is they are pass without particular generator flag). Two reasons of this:
    // 1) We can assume that depth generator is the main one for depth sensor.
    // 2) In the initial vertions of OpenNI integration to OpenCV the value of
    //    flag CV_CAP_OPENNI_DEPTH_GENERATOR was 0 (it isn't zero now).
    case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT :
    case CV_CAP_PROP_FRAME_WIDTH :
    case CV_CAP_PROP_FRAME_HEIGHT :
    case CV_CAP_PROP_FPS :
    case CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH :
    case CV_CAP_PROP_OPENNI_BASELINE :
    case CV_CAP_PROP_OPENNI_FOCAL_LENGTH :
    case CV_CAP_PROP_OPENNI_REGISTRATION :
        propValue = getDepthGeneratorProperty( propIdx );
        break;
    case CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC :
        propValue = !approxSyncGrabber.empty() && approxSyncGrabber->isRun() ? 1. : 0.;
        break;
    case CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE :
        propValue = maxBufferSize;
        break;
    case CV_CAP_PROP_OPENNI_CIRCLE_BUFFER :
        propValue = isCircleBuffer ? 1. : 0.;
        break;
    case CV_CAP_PROP_OPENNI_MAX_TIME_DURATION :
        propValue = maxTimeDuration;
        break;
    default :
    {
        std::stringstream ss;
        ss << "Such parameter (propIdx=" << propIdx << ") isn't supported for getting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return propValue;
}
cv::Mat NFringeStructuredLight::WrapPhase( vector<cv::Mat> fringeImages, cv::Ptr<cv::FilterEngine> filter )
{
  Utils::AssertOrThrowIfFalse(fringeImages.size() == m_numberOfFringes, 
	"Invalid number of fringes passed into phase wrapper");

  // Should be the same size as our fringe images 
  // and floating point precision for decimal phase values
  cv::Mat sine(fringeImages[0].size(), CV_32F, 0.0f);
  cv::Mat cosine(fringeImages[0].size(), CV_32F, 0.0f);
  cv::Mat phase(fringeImages[0].size(), CV_32F, 0.0f);

  for(int row = 0; row < phase.rows; ++row)
  {
	for(int col = 0; col < phase.cols; ++col)
	{
	  for(int fringe = 0; fringe < m_numberOfFringes; ++fringe)
	  {
		sine.at<float>(row, col) += ( float( fringeImages[fringe].at<uchar>(row, col) ) / 255.0 ) * sin(2.0 * M_PI * float(fringe) / float(m_numberOfFringes));
		cosine.at<float>(row, col) += ( float( fringeImages[fringe].at<uchar>(row, col) ) / 255.0 ) * cos(2.0 * M_PI * float(fringe) / float(m_numberOfFringes));
	  }
	}
  }

  // Filter out noise in the sine and cosine components
  if( !filter.empty( ) )
  {
	filter->apply( sine, sine );
	filter->apply( cosine, cosine );
  }

  // Now perform phase wrapping
  for(int row = 0; row < phase.rows; ++row)
  {
	for(int col = 0; col < phase.cols; ++col)
	{
	  // This is negative so that are phase gradient increases from 0 -> rows or 0 -> cols
	  phase.at<float>(row, col) = -atan2( sine.at<float>( row, col ), cosine.at<float>( row, col ) );
	}
  }
  return phase;
}
Exemplo n.º 13
0
int main( int argc, char **argv )
{
	if(argc<4) {
		usage(argc,argv);
		return 1;
	}
	is = helper::createImageSource(argv[1]);
	if(is.empty() || is->done()) {
		loglne("[main] createImageSource failed or no valid imagesource!");
		return -1;
	}
	is->pause(false);
	is->reportInfo();
	is->get(frame);
	imgW = frame.cols; imgH = frame.rows;
	videoFromWebcam = false;
	if( is->classname() == "ImageSource_Camera" ) {
		videoFromWebcam = true;
	}

	loglni("[main] loading K matrix from: "<<argv[2]);
	double K[9];
	std::ifstream kfile(argv[2]);
	for(int i=0; i<9; ++i) kfile >> K[i];
	tracker.loadK(K);
	loglni("[main] K matrix loaded:");
	loglni(helper::PrintMat<>(3,3,K));

	loglni("[main] load template image from: "<<argv[3]);
	tracker.loadTemplate(argv[3]);

	//////////////// TagDetector /////////////////////////////////////////
	int tagid = 0; //default tag16h5
	if(argc>5) tagid = atoi(argv[5]);
	tagFamily = TagFamilyFactory::create(tagid);
	if(tagFamily.empty()) {
		loglne("[main] create TagFamily fail!");
		return -1;
	}
	detector = new TagDetector(tagFamily);
	if(detector.empty()) {
		loglne("[main] create TagDetector fail!");
		return -1;
	}
	Mat temp = imread(argv[3]);
	if( findAprilTag(temp, 0, HI, true) ) {
		namedWindow("template");
		imshow("template", temp);
		iHI = HI.inv();
	} else {
		loglne("[main error] detector did not find any apriltag on template image!");
		return -1;
	}

	//////////////// OSG ////////////////////////////////////////////////
	osg::ref_ptr<osg::Group> root = new osg::Group;

	string scenefilename = (argc>4?argv[4]:("cow.osg"));
	osg::ref_ptr<osg::Node> cow = osgDB::readNodeFile(scenefilename);
	arscene = new helper::ARSceneRoot;
	helper::FixMat<3,double>::Type matK = helper::FixMat<3,double>::ConvertType(K);
	CV2CG::cv2cg(matK,0.01,500,imgW,imgH,*arscene);
	manipMat = new osg::MatrixTransform(osg::Matrix::identity());
	manipMat->addChild(cow);
	manipMat->getOrCreateStateSet()->setMode(GL_NORMALIZE, osg::StateAttribute::ON);
	arscene->addChild(manipMat);

	osg::ref_ptr<osg::Image> backgroundImage = new osg::Image;
	helper::cvmat2osgimage(frame,backgroundImage);
	arvideo = new helper::ARVideoBackground(backgroundImage);
	root->setUpdateCallback(new ARUpdateCallback);

	root->addChild(arvideo);
	root->addChild(arscene);

	viewer.setSceneData(root);
	viewer.addEventHandler(new osgViewer::StatsHandler);
	viewer.addEventHandler(new osgViewer::WindowSizeHandler);
	viewer.addEventHandler(new QuitHandler);

	//start tracking thread
	OpenThreads::Thread::Init();
	TrackThread* thr = new TrackThread;
	thr->start();

	viewer.run();

	delete thr;
	loglni("[main] DONE...exit!");
	return 0;
}
void MapperGradAffine::calculate(
    const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
    Mat gradx, grady, imgDiff;
    Mat img2;

    CV_DbgAssert(img1.size() == image2.size());
    CV_DbgAssert(img1.channels() == image2.channels());
    CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);

    if(!res.empty()) {
        // We have initial values for the registration: we move img2 to that initial reference
        res->inverseWarp(image2, img2);
    } else {
        img2 = image2;
    }

    // Get gradient in all channels
    gradient(img1, img2, gradx, grady, imgDiff);

    // Matrices with reference frame coordinates
    Mat grid_r, grid_c;
    grid(img1, grid_r, grid_c);

    // Calculate parameters using least squares
    Matx<double, 6, 6> A;
    Vec<double, 6> b;
    // For each value in A, all the matrix elements are added and then the channels are also added,
    // so we have two calls to "sum". The result can be found in the first element of the final
    // Scalar object.
    Mat xIx = grid_c.mul(gradx);
    Mat xIy = grid_c.mul(grady);
    Mat yIx = grid_r.mul(gradx);
    Mat yIy = grid_r.mul(grady);
    Mat Ix2 = gradx.mul(gradx);
    Mat Iy2 = grady.mul(grady);
    Mat xy = grid_c.mul(grid_r);
    Mat IxIy = gradx.mul(grady);
    A(0, 0) = sum(sum(sqr(xIx)))[0];
    A(0, 1) = sum(sum(xy.mul(Ix2)))[0];
    A(0, 2) = sum(sum(grid_c.mul(Ix2)))[0];
    A(0, 3) = sum(sum(sqr(grid_c).mul(IxIy)))[0];
    A(0, 4) = sum(sum(xy.mul(IxIy)))[0];
    A(0, 5) = sum(sum(grid_c.mul(IxIy)))[0];
    A(1, 1) = sum(sum(sqr(yIx)))[0];
    A(1, 2) = sum(sum(grid_r.mul(Ix2)))[0];
    A(1, 3) = A(0, 4);
    A(1, 4) = sum(sum(sqr(grid_r).mul(IxIy)))[0];
    A(1, 5) = sum(sum(grid_r.mul(IxIy)))[0];
    A(2, 2) = sum(sum(Ix2))[0];
    A(2, 3) = A(0, 5);
    A(2, 4) = A(1, 5);
    A(2, 5) = sum(sum(IxIy))[0];
    A(3, 3) = sum(sum(sqr(xIy)))[0];
    A(3, 4) = sum(sum(xy.mul(Iy2)))[0];
    A(3, 5) = sum(sum(grid_c.mul(Iy2)))[0];
    A(4, 4) = sum(sum(sqr(yIy)))[0];
    A(4, 5) = sum(sum(grid_r.mul(Iy2)))[0];
    A(5, 5) = sum(sum(Iy2))[0];
    // Lower half values (A is symmetric)
    A(1, 0) = A(0, 1);
    A(2, 0) = A(0, 2);
    A(2, 1) = A(1, 2);
    A(3, 0) = A(0, 3);
    A(3, 1) = A(1, 3);
    A(3, 2) = A(2, 3);
    A(4, 0) = A(0, 4);
    A(4, 1) = A(1, 4);
    A(4, 2) = A(2, 4);
    A(4, 3) = A(3, 4);
    A(5, 0) = A(0, 5);
    A(5, 1) = A(1, 5);
    A(5, 2) = A(2, 5);
    A(5, 3) = A(3, 5);
    A(5, 4) = A(4, 5);

    // Calculation of b
    b(0) = -sum(sum(imgDiff.mul(xIx)))[0];
    b(1) = -sum(sum(imgDiff.mul(yIx)))[0];
    b(2) = -sum(sum(imgDiff.mul(gradx)))[0];
    b(3) = -sum(sum(imgDiff.mul(xIy)))[0];
    b(4) = -sum(sum(imgDiff.mul(yIy)))[0];
    b(5) = -sum(sum(imgDiff.mul(grady)))[0];

    // Calculate affine transformation. We use Cholesky decomposition, as A is symmetric.
    Vec<double, 6> k = A.inv(DECOMP_CHOLESKY)*b;

    Matx<double, 2, 2> linTr(k(0) + 1., k(1), k(3), k(4) + 1.);
    Vec<double, 2> shift(k(2), k(5));
    if(res.empty()) {
        res = Ptr<Map>(new MapAffine(linTr, shift));
    } else {
        MapAffine newTr(linTr, shift);
        res->compose(newTr);
   }
}
 CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
     Detector(detector)
 {
     CV_Assert(!detector.empty());
 }
Exemplo n.º 16
0
void cv::gpu::VideoReader_GPU::open(const cv::Ptr<VideoSource>& source)
{
    CV_Assert( !source.empty() );
    close();
    impl_.reset(new Impl(source));
}
void MapperGradEuclid::calculate(
    const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
    Mat gradx, grady, imgDiff;
    Mat img2;

    CV_DbgAssert(img1.size() == image2.size());
    CV_DbgAssert(img1.channels() == image2.channels());
    CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);

    if(!res.empty()) {
        // We have initial values for the registration: we move img2 to that initial reference
        res->inverseWarp(image2, img2);
    } else {
        img2 = image2;
    }

    // Matrices with reference frame coordinates
    Mat grid_r, grid_c;
    grid(img1, grid_r, grid_c);

    // Get gradient in all channels
    gradient(img1, img2, gradx, grady, imgDiff);

    // Calculate parameters using least squares
    Matx<double, 3, 3> A;
    Vec<double, 3> b;
    // For each value in A, all the matrix elements are added and then the channels are also added,
    // so we have two calls to "sum". The result can be found in the first element of the final
    // Scalar object.
    Mat xIy_yIx = grid_c.mul(grady);
    xIy_yIx -= grid_r.mul(gradx);

    A(0, 0) = sum(sum(gradx.mul(gradx)))[0];
    A(0, 1) = sum(sum(gradx.mul(grady)))[0];
    A(0, 2) = sum(sum(gradx.mul(xIy_yIx)))[0];
    A(1, 1) = sum(sum(grady.mul(grady)))[0];
    A(1, 2) = sum(sum(grady.mul(xIy_yIx)))[0];
    A(2, 2) = sum(sum(xIy_yIx.mul(xIy_yIx)))[0];
    A(1, 0) = A(0, 1);
    A(2, 0) = A(0, 2);
    A(2, 1) = A(1, 2);

    b(0) = -sum(sum(imgDiff.mul(gradx)))[0];
    b(1) = -sum(sum(imgDiff.mul(grady)))[0];
    b(2) = -sum(sum(imgDiff.mul(xIy_yIx)))[0];

    // Calculate parameters. We use Cholesky decomposition, as A is symmetric.
    Vec<double, 3> k = A.inv(DECOMP_CHOLESKY)*b;

    double cosT = cos(k(2));
    double sinT = sin(k(2));
    Matx<double, 2, 2> linTr(cosT, -sinT, sinT, cosT);
    Vec<double, 2> shift(k(0), k(1));

    if(res.empty()) {
        res = Ptr<Map>(new MapAffine(linTr, shift));
    } else {
        MapAffine newTr(linTr, shift);
        res->compose(newTr);
   }
}
Exemplo n.º 18
0
Node::Node(ros::NodeHandle& nh, const cv::Mat& visual,
    cv::Ptr<cv::FeatureDetector> detector,
    cv::Ptr<cv::DescriptorExtractor> extractor,
    cv::Ptr<cv::DescriptorMatcher> matcher,
    const sensor_msgs::PointCloud2ConstPtr point_cloud,
    const cv::Mat& detection_mask)
: id_(0), 
flannIndex(NULL),
matcher_(matcher)
{
#ifdef USE_ICP_CODE
  gicp_initialized = false;
#endif
  std::clock_t starttime=std::clock();

#ifdef USE_SIFT_GPU
  SiftGPUFeatureDetector* siftgpu = SiftGPUFeatureDetector::GetInstance();
  float* descriptors = siftgpu->detect(visual, feature_locations_2d_);
  if (descriptors == NULL) {
    ROS_FATAL("Can't run SiftGPU");
  }
#else
  ROS_FATAL_COND(detector.empty(), "No valid detector!");
  detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
#endif

  ROS_INFO("Feature detection and descriptor extraction runtime: %f", ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC);
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );

  /*
    if (id_  == 0)
        cloud_pub_ = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_base",10);
    else{
   */
  cloud_pub_ = nh.advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
  //   cloud_pub_ransac = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_current_ransac",10);
  //} */

  // get pcl::Pointcloud to extract depthValues a pixel positions
  std::clock_t starttime5=std::clock();
  // TODO: If batch sending/saving of clouds would be removed, the pointcloud wouldn't have to be saved
  // which would slim down the Memory requirements
  pcl::fromROSMsg(*point_cloud,pc_col);
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "pc2->pcl conversion runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );

  // project pixels to 3dPositions and create search structures for the gicp
#ifdef USE_SIFT_GPU
  // removes also unused descriptors from the descriptors matrix
  // build descriptor matrix
  projectTo3DSiftGPU(feature_locations_2d_, feature_locations_3d_, pc_col, descriptors, feature_descriptors_); //takes less than 0.01 sec

  if (descriptors != NULL) delete descriptors;

#else
  projectTo3D(feature_locations_2d_, feature_locations_3d_, pc_col); //takes less than 0.01 sec
#endif

  // projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
#ifdef USE_ICP_CODE
  std::clock_t starttime4=std::clock();
  createGICPStructures(); 
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "gicp runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );
#endif

  std::clock_t starttime2=std::clock();
#ifndef USE_SIFT_GPU
//  ROS_INFO("Use extractor");
  //cv::Mat topleft, topright;
  //topleft = visual.colRange(0,visual.cols/2+50);
  //topright= visual.colRange(visual.cols/2+50, visual.cols-1);
	//std::vector<cv::KeyPoint> kp1, kp2; 
  //extractor->compute(topleft, kp1, feature_descriptors_); //fill feature_descriptors_ with information 
  extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
#endif
  assert(feature_locations_2d_.size() == feature_locations_3d_.size());
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );

  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC  <<"sec");
}
Exemplo n.º 19
0
  /*
   * Initializes annotator
   */
  TyErrorId initialize(AnnotatorContext &ctx)
  {
    outInfo("initialize");

    if(ctx.isParameterDefined("keypointDetector"))
    {
      ctx.extractValue("keypointDetector", keypointDetector);
    }
    else
    {
      outError("no keypoint detector provided!");
      return UIMA_ERR_ANNOTATOR_MISSING_INIT;
    }

    if(ctx.isParameterDefined("featureExtractor"))
    {
      ctx.extractValue("featureExtractor", featureExtractor);
    }
    else
    {
      outError("no feature extractor provided!");
      return UIMA_ERR_ANNOTATOR_MISSING_INIT;
    }

    outDebug("creating " << keypointDetector << " key points detector...");
    detector = cv::FeatureDetector::create(keypointDetector);
    if(detector.empty())
    {
      outError("creation failed!");
      return UIMA_ERR_ANNOTATOR_MISSING_INIT;
    }

#if OUT_LEVEL == OUT_LEVEL_DEBUG
    printParams(detector);
#endif
    setupAlgorithm(detector);

    outDebug("creating " << featureExtractor << " feature extractor...");
    extractor = cv::DescriptorExtractor::create(featureExtractor);
    if(extractor.empty())
    {
      outError("creation failed!");
      return UIMA_ERR_ANNOTATOR_MISSING_INIT;
    }

#if OUT_LEVEL == OUT_LEVEL_DEBUG
    printParams(extractor);
#endif
    setupAlgorithm(extractor);

    if(featureExtractor == "SIFT" || featureExtractor == "SURF")
    {
      featureType = "numerical";
    }
    else
    {
      featureType = "binary";
    }

    return UIMA_ERR_NONE;
  }
Exemplo n.º 20
0
    std::vector<bbox_t> tracking_flow(cv::Mat dst_mat, bool check_error = true)
    {
        if (sync_PyrLKOpticalFlow_gpu.empty()) {
            std::cout << "sync_PyrLKOpticalFlow_gpu isn't initialized \n";
            return cur_bbox_vec;
        }

        int const old_gpu_id = cv::cuda::getDevice();
        if(old_gpu_id != gpu_id)
            cv::cuda::setDevice(gpu_id);

        if (dst_mat_gpu.cols == 0) {
            dst_mat_gpu = cv::cuda::GpuMat(dst_mat.size(), dst_mat.type());
            dst_grey_gpu = cv::cuda::GpuMat(dst_mat.size(), CV_8UC1);
        }

        //dst_grey_gpu.upload(dst_mat, stream);    // use BGR
        dst_mat_gpu.upload(dst_mat, stream);
        cv::cuda::cvtColor(dst_mat_gpu, dst_grey_gpu, CV_BGR2GRAY, 1, stream);

        if (src_grey_gpu.rows != dst_grey_gpu.rows || src_grey_gpu.cols != dst_grey_gpu.cols) {
            stream.waitForCompletion();
            src_grey_gpu = dst_grey_gpu.clone();
            cv::cuda::setDevice(old_gpu_id);
            return cur_bbox_vec;
        }

        ////sync_PyrLKOpticalFlow_gpu.sparse(src_grey_gpu, dst_grey_gpu, prev_pts_flow_gpu, cur_pts_flow_gpu, status_gpu, &err_gpu);    // OpenCV 2.4.x
        sync_PyrLKOpticalFlow_gpu->calc(src_grey_gpu, dst_grey_gpu, prev_pts_flow_gpu, cur_pts_flow_gpu, status_gpu, err_gpu, stream);    // OpenCV 3.x

        cv::Mat cur_pts_flow_cpu;
        cur_pts_flow_gpu.download(cur_pts_flow_cpu, stream);

        dst_grey_gpu.copyTo(src_grey_gpu, stream);

        cv::Mat err_cpu, status_cpu;
        err_gpu.download(err_cpu, stream);
        status_gpu.download(status_cpu, stream);

        stream.waitForCompletion();

        std::vector<bbox_t> result_bbox_vec;

        if (err_cpu.cols == cur_bbox_vec.size() && status_cpu.cols == cur_bbox_vec.size())
        {
            for (size_t i = 0; i < cur_bbox_vec.size(); ++i)
            {
                cv::Point2f cur_key_pt = cur_pts_flow_cpu.at<cv::Point2f>(0, i);
                cv::Point2f prev_key_pt = prev_pts_flow_cpu.at<cv::Point2f>(0, i);

                float moved_x = cur_key_pt.x - prev_key_pt.x;
                float moved_y = cur_key_pt.y - prev_key_pt.y;

                if (abs(moved_x) < 100 && abs(moved_y) < 100 && good_bbox_vec_flags[i])
                    if (err_cpu.at<float>(0, i) < flow_error && status_cpu.at<unsigned char>(0, i) != 0 &&
                        ((float)cur_bbox_vec[i].x + moved_x) > 0 && ((float)cur_bbox_vec[i].y + moved_y) > 0)
                    {
                        cur_bbox_vec[i].x += moved_x + 0.5;
                        cur_bbox_vec[i].y += moved_y + 0.5;
                        result_bbox_vec.push_back(cur_bbox_vec[i]);
                    }
                    else good_bbox_vec_flags[i] = false;
                else good_bbox_vec_flags[i] = false;

                //if(!check_error && !good_bbox_vec_flags[i]) result_bbox_vec.push_back(cur_bbox_vec[i]);
            }
        }

        cur_pts_flow_gpu.swap(prev_pts_flow_gpu);
        cur_pts_flow_cpu.copyTo(prev_pts_flow_cpu);

        if (old_gpu_id != gpu_id)
            cv::cuda::setDevice(old_gpu_id);

        return result_bbox_vec;
    }
Exemplo n.º 21
0
void CHumanTracker::detectAndTrackFace()
{
    static ros::Time probe;

	// Do ROI
	debugFrame = rawFrame.clone();
	Mat img =  this->rawFrame(searchROI);

	faces.clear();
	ostringstream txtstr;
    const static Scalar colors[] =  { CV_RGB(0,0,255),
        CV_RGB(0,128,255),
        CV_RGB(0,255,255),
        CV_RGB(0,255,0),
        CV_RGB(255,128,0),
        CV_RGB(255,255,0),
        CV_RGB(255,0,0),
        CV_RGB(255,0,255)} ;
    Mat gray;
    Mat frame( cvRound(img.rows), cvRound(img.cols), CV_8UC1 );
    cvtColor( img, gray, CV_BGR2GRAY );
    resize( gray, frame, frame.size(), 0, 0, INTER_LINEAR );
    //equalizeHist( frame, frame );

	// This if for internal usage
    const ros::Time _n = ros::Time::now();
	double dt = (_n - probe).toSec();
	probe = _n;


	CvMat _image = frame;

    if (!storage.empty())
    {
        cvClearMemStorage(storage);
    }
    CvSeq* _objects = cvHaarDetectObjects(&_image, cascade, storage,
			1.2, initialScoreMin, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE, minFaceSize, maxFaceSize);

	vector<CvAvgComp> vecAvgComp;
	Seq<CvAvgComp>(_objects).copyTo(vecAvgComp);

	// End of using C API

	isFaceInCurrentFrame = (vecAvgComp.size() > 0);

    // This is a hack
    bool isProfileFace = false;
    if ((profileHackEnabled) && (!isFaceInCurrentFrame) && ((trackingState == STATE_REJECT) || (trackingState == STATE_REJECT)))
    {
        ROS_DEBUG("Using Profile Face hack ...");

        if (!storageProfile.empty()) {
            cvClearMemStorage(storageProfile);
        }
        CvSeq* _objectsProfile = cvHaarDetectObjects(&_image, cascadeProfile, storageProfile,
                1.2, initialScoreMin, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE, minFaceSize, maxFaceSize);
        vecAvgComp.clear();
        Seq<CvAvgComp>(_objectsProfile).copyTo(vecAvgComp);
        isFaceInCurrentFrame = (vecAvgComp.size() > 0);
        if (isFaceInCurrentFrame)
        {
            ROS_DEBUG("The hack seems to work!");
        }
        isProfileFace = true;
    }

	if (trackingState == STATE_LOST)
	{
		if (isFaceInCurrentFrame)
		{
			stateCounter++;
			trackingState = STATE_DETECT;
		}
	}

	if (trackingState == STATE_DETECT)
	{
		if (isFaceInCurrentFrame)
		{
			stateCounter++;
		}
		else
		{
			stateCounter = 0;
			trackingState = STATE_LOST;
		}

		if (stateCounter > minDetectFrames)
		{
			stateCounter = 0;
			trackingState = STATE_TRACK;
		}

	}

	if (trackingState == STATE_TRACK)
	{
		if (!isFaceInCurrentFrame)
		{
			trackingState = STATE_REJECT;
		}
	}

	if (trackingState == STATE_REJECT)
	{
		float covNorm = sqrt(
                        pow(KFTracker.errorCovPost.at<float>(0,0), 2) +
                        pow(KFTracker.errorCovPost.at<float>(1,1), 2)
						);

		if (!isFaceInCurrentFrame)
		{
			stateCounter++;
		}
		else
		{
			stateCounter = 0;
			trackingState = STATE_TRACK;
		}

		if ((stateCounter > minRejectFrames) && (covNorm > maxRejectCov))
		{
			trackingState = STATE_LOST;
			stateCounter = 0;
			resetKalmanFilter();
            reset();
		}
	}

	if ((trackingState == STATE_TRACK) || (trackingState == STATE_REJECT))
	{
		bool updateFaceHist = false;
		// This is important:

        KFTracker.transitionMatrix.at<float>(0,2) = dt;
        KFTracker.transitionMatrix.at<float>(1,3) = dt;

        Mat pred = KFTracker.predict();

		if (isFaceInCurrentFrame)
		{
            //std::cout << vecAvgComp.size() << " detections in image " << std::endl;
			float minCovNorm = 1e24;
			int i = 0;
			for( vector<CvAvgComp>::const_iterator rr = vecAvgComp.begin(); rr != vecAvgComp.end(); rr++, i++ )
			{
				copyKalman(KFTracker, MLSearch);
				CvRect r = rr->rect;
				r.x += searchROI.x;
				r.y += searchROI.y;
				double nr = rr->neighbors;
				Point center;
				Scalar color = colors[i%8];

				float normFaceScore = 1.0 - (nr / 40.0);
				if (normFaceScore > 1.0) normFaceScore = 1.0;
				if (normFaceScore < 0.0) normFaceScore = 0.0;
                setIdentity(MLSearch.measurementNoiseCov, Scalar_<float>::all(normFaceScore));

				center.x = cvRound(r.x + r.width*0.5);
				center.y = cvRound(r.y + r.height*0.5);

				measurement.at<float>(0) = r.x;
				measurement.at<float>(1) = r.y;
				measurement.at<float>(2) = r.width;
				measurement.at<float>(3) = r.height;

                MLSearch.correct(measurement);

				float covNorm = sqrt(
                    pow(MLSearch.errorCovPost.at<float>(0,0), 2) +
                    pow(MLSearch.errorCovPost.at<float>(1,1), 2)
				);

				if (covNorm < minCovNorm)
				{
					minCovNorm = covNorm;
					MLFace = *rr;
				}

//                if ((debugLevel & 0x02) == 0x02)
//                {
                    rectangle(debugFrame, center - Point(r.width*0.5, r.height*0.5), center + Point(r.width*0.5, r.height * 0.5), color);

                    txtstr.str("");
                    txtstr << "   Sc:" << rr->neighbors << " S:" << r.width << "x" << r.height;

                    putText(debugFrame, txtstr.str(), center, FONT_HERSHEY_PLAIN, 1, color);
//                }
			}

			// TODO: I'll fix this shit
            Rect r(MLFace.rect);
			r.x += searchROI.x;
			r.y += searchROI.y;
			faces.push_back(r);
			double nr = MLFace.neighbors;
			faceScore = nr;
            if (isProfileFace) faceScore = 0.0;
			float normFaceScore = 1.0 - (nr / 40.0);
			if (normFaceScore > 1.0) normFaceScore = 1.0;
			if (normFaceScore < 0.0) normFaceScore = 0.0;
            setIdentity(KFTracker.measurementNoiseCov, Scalar_<float>::all(normFaceScore));
			measurement.at<float>(0) = r.x;
			measurement.at<float>(1) = r.y;
			measurement.at<float>(2) = r.width;
			measurement.at<float>(3) = r.height;
            KFTracker.correct(measurement);

            // We see a face
			updateFaceHist = true;
		}
		else
		{
            KFTracker.statePost = KFTracker.statePre;
            KFTracker.errorCovPost = KFTracker.errorCovPre;
		}

        // TODO: MOVE THIS
        for (unsigned int k = 0; k < faces.size(); k++) {
            rectangle(debugFrame, faces.at(k), CV_RGB(128, 128, 128));
        }

        beleif.x = max<int>(KFTracker.statePost.at<float>(0), 0);
        beleif.y = max<int>(KFTracker.statePost.at<float>(1), 0);
        beleif.width = min<int>(KFTracker.statePost.at<float>(4), iWidth - beleif.x);
        beleif.height = min<int>(KFTracker.statePost.at<float>(5), iHeight - beleif.y);

		Point belCenter;
		belCenter.x = beleif.x + (beleif.width * 0.5);
		belCenter.y = beleif.y + (beleif.height * 0.5);

        if ((debugLevel & 0x02) == 0x02)
        {
            txtstr.str("");
//            txtstr << "P:" << std::setprecision(3) << faceUncPos << " S:" << beleif.width << "x" << beleif.height;
//            putText(debugFrame, txtstr.str(), belCenter + Point(0, 50), FONT_HERSHEY_PLAIN, 2, CV_RGB(255,0,0));

//            circle(debugFrame, belCenter, belRad, CV_RGB(255,0,0));
//            circle(debugFrame, belCenter, (belRad - faceUncPos < 0) ? 0 : (belRad - faceUncPos), CV_RGB(255,255,0));
//            circle(debugFrame, belCenter, belRad + faceUncPos, CV_RGB(255,0,255));
        }

        //searchROI.x = max<int>(belCenter.x - KFTracker.statePost.at<float>(4) * 2, 0);
        //searchROI.y = max<int>(belCenter.y - KFTracker.statePost.at<float>(5) * 2, 0);
        //int x2 = min<int>(belCenter.x + KFTracker.statePost.at<float>(4) * 2, iWidth);
        //int y2 = min<int>(belCenter.y + KFTracker.statePost.at<float>(4) * 2, iHeight);
		//searchROI.width = x2 - searchROI.x;
		//searchROI.height = y2 - searchROI.y;


		if ((updateFaceHist) && (skinEnabled))
		{
            //updateFaceHist is true when we see a real face (not all the times)
            Rect samplingWindow;
//            samplingWindow.x = beleif.x + (0.25 * beleif.width);
//            samplingWindow.y = beleif.y + (0.1 * beleif.height);
//            samplingWindow.width = beleif.width * 0.5;
//            samplingWindow.height = beleif.height * 0.9;
            samplingWindow.x = measurement.at<float>(0) + (0.25 * measurement.at<float>(2));
            samplingWindow.y = measurement.at<float>(1) + (0.10 * measurement.at<float>(3));
            samplingWindow.width = measurement.at<float>(2) * 0.5;
            samplingWindow.height = measurement.at<float>(3) * 0.9;
            if ((debugLevel & 0x04) == 0x04)
            {
                rectangle(debugFrame, samplingWindow, CV_RGB(255,0,0));
            }
			Mat _face = rawFrame(samplingWindow);
			generateRegionHistogram(_face, faceHist);
		}


	}


//    if ((debugLevel & 0x02) == 0x02)
//    {
//        rectangle(debugFrame, searchROI, CV_RGB(0,0,0));
//        txtstr.str("");
//        txtstr << strStates[trackingState] << "(" << std::setprecision(3) << (dt * 1e3) << "ms )";
//        putText(debugFrame, txtstr.str() , Point(30,300), FONT_HERSHEY_PLAIN, 2, CV_RGB(255,255,255));
//    }

//	dt =  ((double) getTickCount() - t) / ((double) getTickFrequency()); // In Seconds
}