Ejemplo n.º 1
1
PowertfDialog::PowertfDialog(cv::Mat& img, QWidget *parent)
    : QDialog(parent)
{
    r = 1;
    c = 1;
    b = 0;
    img.copyTo(image);
    pimage = &img;

    rSlider = new QSlider(Qt::Horizontal);
    rSlider->setRange(0,10);
    rSlider->setValue(r);
    cSlider = new QSlider(Qt::Horizontal);
    cSlider->setRange(0,10);
    cSlider->setValue(c);
    bSlider = new QSlider(Qt::Horizontal);
    bSlider->setRange(0,10);
    bSlider->setValue(b);

    rSBx = new QSpinBox();
    rSBx->setRange(0,10);
    rSBx->setValue(r);
    cSBx = new QSpinBox();
    cSBx->setRange(0,10);
    cSBx->setValue(c);
    bSBx = new QSpinBox();
    bSBx->setRange(0,10);
    bSBx->setValue(b);

    connect(rSlider,SIGNAL(valueChanged(int)),this,SLOT(rChanged(int)));
    connect(cSlider,SIGNAL(valueChanged(int)),this,SLOT(cChanged(int)));
    connect(bSlider,SIGNAL(valueChanged(int)),this,SLOT(bChanged(int)));
    connect(rSlider,SIGNAL(valueChanged(int)),rSBx,SLOT(setValue(int)));
    connect(cSlider,SIGNAL(valueChanged(int)),cSBx,SLOT(setValue(int)));
    connect(bSlider,SIGNAL(valueChanged(int)),bSBx,SLOT(setValue(int)));

    connect(rSBx,SIGNAL(valueChanged(int)),this,SLOT(rChanged(int)));
    connect(cSBx,SIGNAL(valueChanged(int)),this,SLOT(cChanged(int)));
    connect(bSBx,SIGNAL(valueChanged(int)),this,SLOT(bChanged(int)));
    connect(rSBx,SIGNAL(valueChanged(int)),rSlider,SLOT(setValue(int)));
    connect(cSBx,SIGNAL(valueChanged(int)),cSlider,SLOT(setValue(int)));
    connect(bSBx,SIGNAL(valueChanged(int)),bSlider,SLOT(setValue(int)));

    rLabel = new QLabel(tr("r"));
    cLabel = new QLabel(tr("c"));
    bLabel = new QLabel(tr("b"));

    okButton = new QPushButton(tr("&OK"));
    okButton->setDefault(true);
    okButton->setEnabled(true);
    connect(okButton, SIGNAL(clicked()), this, SLOT(okClicked()));

    closeButton = new QPushButton(tr("&Close"));
    connect(closeButton, SIGNAL(clicked()), this, SLOT(closePowertf()));

    QHBoxLayout *rLayout = new QHBoxLayout;
    rLayout->addWidget(rLabel);
    rLayout->addWidget(rSlider);
    rLayout->addWidget(rSBx);
    QHBoxLayout *cLayout = new QHBoxLayout;
    cLayout->addWidget(cLabel);
    cLayout->addWidget(cSlider);
    cLayout->addWidget(cSBx);
    QHBoxLayout *bLayout = new QHBoxLayout;
    bLayout->addWidget(bLabel);
    bLayout->addWidget(bSlider);
    bLayout->addWidget(bSBx);

    QVBoxLayout *leftLayout = new QVBoxLayout;
    leftLayout->addLayout(rLayout);
    leftLayout->addLayout(cLayout);
    leftLayout->addLayout(bLayout);
    
    QVBoxLayout *rightLayout = new QVBoxLayout;
    rightLayout->addWidget(okButton);
    rightLayout->addWidget(closeButton);
    rightLayout->addStretch();

    QHBoxLayout *mainLayout = new QHBoxLayout;
    mainLayout->addLayout(leftLayout);
    mainLayout->addLayout(rightLayout);
    setLayout(mainLayout);

    setWindowTitle(tr("Power Tranform"));
    setFixedHeight(sizeHint().height());
}
Ejemplo n.º 2
1
//general processing function of UVDisparity based function
cv::Mat UVDisparity::Process(cv::Mat& img_L, cv::Mat& disp_sgbm,
                             VisualOdometryStereo& vo, cv::Mat& xyz,
                             cv::Mat& roi_mask, cv::Mat& ground_mask,
                             double& pitch1, double& pitch2)
{
    cv::Mat mask_moving;
    calVDisparity(disp_sgbm,xyz);

    //sequentially estimate pitch angles by Kalman Filter
    vector<cv::Mat> pitch_measures;

    pitch_measures = Pitch_Classify(xyz,ground_mask);
    pitch1_KF->predict();
    pitch1_KF->correct(pitch_measures[0]);

    pitch2_KF->predict();
    pitch2_KF->correct(pitch_measures[1]);

    pitch1 = pitch_measures[0].at<float>(0);
    pitch2 = pitch_measures[1].at<float>(0);


    //Improve 3D reconstruction results by pitch angles
    correct3DPoints(xyz,roi_,pitch1_KF->statePost.at<float>(0),pitch2_KF->statePost.at<float>(0));

    //set image ROI according to ROI3D (ROI within a 3D space)
    setImageROI(xyz, roi_mask);

    //filter inliers and outliers
    filterInOut(img_L,roi_mask,disp_sgbm,vo,pitch1);

    //calculate Udisparity image
    calUDisparity(disp_sgbm,xyz,roi_mask,ground_mask);

    //using sigmoid function to adjust Udisparity image for segmentation
    double scale = 0.02, range = 32;
    adjustUdisIntense(scale,range);

     //Find all possible segmentation
     findAllMasks(vo,img_L,xyz,roi_mask);

    if(masks_.size()>0)
    {
       //merge overlapped masks
       mergeMasks();

       //improve the segments by inliers
       verifyByInliers(vo,img_L);
     }

   //perform segmentation in disparity image
   segmentation(disp_sgbm,img_L,roi_mask,mask_moving);

   //demonstration
   cv::Mat img_show;
   img_L.copyTo(img_show,mask_moving);
   cv::imshow("moving",img_show);
   cv::waitKey(1);

   masks_.clear();
   return mask_moving;
}
Ejemplo n.º 3
0
static void dominantTransforms(const cv::Mat &img, std::vector <cv::Matx33f> &transforms,
                               const int nTransform, const int psize)
{
    /** Walsh-Hadamard Transformation **/

    std::vector <cv::Mat> channels;
    cv::split(img, channels);

    int cncase = std::max(img.channels() - 2, 0);
    const int np[] = {cncase == 0 ? 12 : (cncase == 1 ? 16 : 10),
                      cncase == 0 ? 12 : (cncase == 1 ? 04 : 02),
                      cncase == 0 ? 00 : (cncase == 1 ? 04 : 02),
                      cncase == 0 ? 00 : (cncase == 1 ? 00 : 10)};

    for (int i = 0; i < img.channels(); ++i)
        rgb2whs(channels[i], channels[i], np[i], psize);

    cv::Mat whs; // Walsh-Hadamard series
    cv::merge(channels, whs);

    KDTree <float, 24> kdTree(whs, 16, 32);
    std::vector <int> annf( whs.total(), 0 );

    /** Propagation-assisted kd-tree search **/

    for (int i = 0; i < whs.rows; ++i)
        for (int j = 0; j < whs.cols; ++j)
        {
            double dist = std::numeric_limits <double>::max();
            int current = i*whs.cols + j;

            int dy[] = {0, 1, 0}, dx[] = {0, 0, 1};
            for (int k = 0; k < int( sizeof(dy)/sizeof(int) ); ++k)
                if (i - dy[k] >= 0 && j - dx[k] >= 0)
                {
                    int neighbor = (i - dy[k])*whs.cols + (j - dx[k]);
                    int leafIdx = k == 0 ? neighbor :
                        annf[neighbor] + dy[k]*whs.cols + dx[k];
                    kdTree.updateDist(leafIdx, current,
                               annf[i*whs.cols + j], dist);
                }
        }

    /** Local maxima extraction **/

    cv::Mat_<double> annfHist(2*whs.rows - 1, 2*whs.cols - 1, 0.0),
                    _annfHist(2*whs.rows - 1, 2*whs.cols - 1, 0.0);
    for (size_t i = 0; i < annf.size(); ++i)
        ++annfHist( (annf[i] - int(i))/whs.cols + whs.rows - 1,
                    (annf[i] - int(i))%whs.cols + whs.cols - 1 );

    cv::GaussianBlur( annfHist, annfHist,
        cv::Size(0, 0), std::sqrt(2.0), 0.0, cv::BORDER_CONSTANT);
    cv::dilate( annfHist, _annfHist,
        cv::Matx<uchar, 9, 9>::ones() );

    std::vector < std::pair<double, int> > amount;
    std::vector <cv::Point2i> shiftM;

    for (int i = 0, t = 0; i < annfHist.rows; ++i)
    {
        double  *pAnnfHist =  annfHist.ptr<double>(i);
        double *_pAnnfHist = _annfHist.ptr<double>(i);

        for (int j = 0; j < annfHist.cols; ++j)
            if ( pAnnfHist[j] != 0 && pAnnfHist[j] == _pAnnfHist[j] )
            {
                amount.push_back( std::make_pair(pAnnfHist[j], t++) );
                shiftM.push_back( cv::Point2i(j - whs.cols + 1,
                                              i - whs.rows + 1) );
            }
    }

    std::partial_sort( amount.begin(), amount.begin() + nTransform,
        amount.end(), std::greater< std::pair<double, int> >() );

    transforms.resize(nTransform);
    for (int i = 0; i < nTransform; ++i)
    {
        int idx = amount[i].second;
        transforms[i] = cv::Matx33f(1, 0, float(shiftM[idx].x),
                                    0, 1, float(shiftM[idx].y),
                                    0, 0,          1          );
    }
}
/*
 * objective : get the gray level map of the input image and rescale it to the range [0-255]
 */
 static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit)
 {

     // adjust output matrix wrt the input size but single channel
     std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
     //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
     //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;

     // rescale between 0-255, keeping floating point values
     cv::normalize(inputMat, outputMat, 0.0, 255.0, cv::NORM_MINMAX);

     // extract a 8bit image that will be used for histogram edge cut
     cv::Mat intGrayImage;
     if (inputMat.channels()==1)
     {
         outputMat.convertTo(intGrayImage, CV_8U);
     }else
     {
         cv::Mat rgbIntImg;
         outputMat.convertTo(rgbIntImg, CV_8UC3);
         cv::cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
     }

     // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
     cv::Mat dst, hist;
     int histSize = 256;
     calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
     cv::Mat normalizedHist;
     normalize(hist, normalizedHist, 1, 0, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1

     double min_val, max_val;
     minMaxLoc(normalizedHist, &min_val, &max_val);
     //std::cout<<"Hist max,min = "<<max_val<<", "<<min_val<<std::endl;

     // compute density probability
     cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
     denseProb.at<float>(0)=normalizedHist.at<float>(0);
     int histLowerLimit=0, histUpperLimit=0;
     for (int i=1;i<normalizedHist.size().height;++i)
     {
         denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
         //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
         if ( denseProb.at<float>(i)<histogramClippingLimit)
             histLowerLimit=i;
         if ( denseProb.at<float>(i)<1-histogramClippingLimit)
             histUpperLimit=i;
     }
     // deduce min and max admitted gray levels
     float minInputValue = (float)histLowerLimit/histSize*255;
     float maxInputValue = (float)histUpperLimit/histSize*255;

     std::cout<<"=> Histogram limits "
             <<"\n\t"<<histogramClippingLimit*100<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
             <<"\n\t"<<(1-histogramClippingLimit)*100<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
             <<std::endl;
     //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
     drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);

     // rescale image range [minInputValue-maxInputValue] to [0-255]
     outputMat-=minInputValue;
     outputMat*=255.0/(maxInputValue-minInputValue);
     // cut original histogram and back project to original image
     cv::threshold( outputMat, outputMat, 255.0, 255.0, 2 ); //THRESH_TRUNC, clips values above 255
     cv::threshold( outputMat, outputMat, 0.0, 0.0, 3 ); //THRESH_TOZERO, clips values under 0

 }
bool CaffeFeatExtractor<Dtype>::extract_singleFeat_1D(cv::Mat &image, vector<Dtype> &features, float (&times)[2])
{

    times[0] = 0.0f;
    times[1] = 0.0f;

    // Check input image
    if (image.empty())
    {
        std::cout << "CaffeFeatExtractor::extract_singleFeat_1D(): empty imMat!" << std::endl;
        return false;
    }

    // Start timing
    cudaEvent_t startPrep, stopPrep, startNet, stopNet;
    if (timing)
    {
        cudaEventCreate(&startPrep);
        cudaEventCreate(&startNet);
        cudaEventCreate(&stopPrep);
        cudaEventCreate(&stopNet);
        cudaEventRecord(startPrep, NULL);
        cudaEventRecord(startNet, NULL);
    }

    // Prepare Caffe

    // Set the GPU/CPU mode for Caffe (here in order to be thread-safe)
    if (gpu_mode)
    {
        Caffe::set_mode(Caffe::GPU);
        Caffe::SetDevice(device_id);
    }
    else
    {
        Caffe::set_mode(Caffe::CPU);
    }

    // Initialize labels to zero
    int label = 0;

    // Get pointer to data layer to set the input
    caffe::shared_ptr<MemoryDataLayer<Dtype> > memory_data_layer = boost::dynamic_pointer_cast<caffe::MemoryDataLayer<Dtype> >(feature_extraction_net->layers()[0]);

    // Set batch size to 1
    if (memory_data_layer->batch_size()!=1)
    {
        memory_data_layer->set_batch_size(1);
        std::cout << "CaffeFeatExtractor::extract_singleFeat_1D(): BATCH SIZE = " << memory_data_layer->batch_size() << std::endl;
    }

    // Image preprocessing
    // The image passed to AddMatVector must be same size as the mean image
    // If not, it is resized:
    // if it is downsampled, an anti-aliasing Gaussian Filter is applied
    if (image.rows != mean_height || image.cols != mean_height)
    {
        if (image.rows > mean_height || image.cols > mean_height)
        {
            cv::resize(image, image, cv::Size(mean_height, mean_width), 0, 0, CV_INTER_LANCZOS4);
        }
        else
        {
            cv::resize(image, image, cv::Size(mean_height, mean_width), 0, 0, CV_INTER_LINEAR);
        }
    }

    memory_data_layer->AddMatVector(vector<cv::Mat>(1, image),vector<int>(1,label));

    size_t num_features = blob_names.size();
    if(num_features!=1)
    {
        std::cout << "CaffeFeatExtractor::extract_singleFeat_1D(): Error! The list of features to be extracted has not size one!" << std::endl;
        return false;
    }

    if (timing)
    {
        // Record the stop event
        cudaEventRecord(stopPrep, NULL);

        // Wait for the stop event to complete
        cudaEventSynchronize(stopPrep);

        cudaEventElapsedTime(times, startPrep, stopPrep);
    }


    // Run network and retrieve features!

    // depending on your net's architecture, the blobs will hold accuracy and/or labels, etc
    std::vector<Blob<Dtype>*> results = feature_extraction_net->Forward();

    const caffe::shared_ptr<Blob<Dtype> > feature_blob = feature_extraction_net->blob_by_name(blob_names[0]);

    int batch_size = feature_blob->num(); // should be 1
    if (batch_size!=1)
    {
        std::cout << "CaffeFeatExtractor::extract_singleFeat_1D(): Error! Retrieved more than one feature, exiting..." << std::endl;
        return -1;
    }

    int feat_dim = feature_blob->count(); // should be equal to: count/num=channels*width*height
    if (feat_dim!=feature_blob->channels())
    {
        std::cout << "CaffeFeatExtractor::extract_singleFeat_1D(): Attention! The feature is not 1D: unrolling according to Caffe's order (i.e. channel, height, width)" << std::endl;
    }

    features.insert(features.end(), feature_blob->mutable_cpu_data() + feature_blob->offset(0), feature_blob->mutable_cpu_data() + feature_blob->offset(0) + feat_dim);

    if (timing)
    {
        // Record the stop event
        cudaEventRecord(stopNet, NULL);

        // Wait for the stop event to complete
        cudaEventSynchronize(stopNet);

        cudaEventElapsedTime(times+1, startNet, stopNet);

    }

    return true;

}
Ejemplo n.º 6
0
Archivo: hist.cpp Proyecto: sensq/home
/**
@brief 色相だけのヒストグラムを計算する
@param src ヒストグラムを計算する画像
*/
void HIST::calcHistgramHue(cv::Mat &src)
{
	if(src.data == NULL)
		return;
	
	// グラフのデータ間の距離
	stepH = (double)ui.histgramH->width()/180;

	cv::cvtColor(src, src, cv::COLOR_BGR2HSV);
	int count[180];
	int max = 0;

	// 初期化
	for (int i = 0; i < 180; i++)
		count[i] = 0;

	// 色相の各値の個数を取得
	for (int j = 0; j < src.cols; j++)
		for (int i = 0; i < src.rows; i++)
			// ほぼ白(彩度≒0)は無視
				if(src.data[i * src.step + j * src.elemSize() + 1] > 3)
					count[src.data[i * src.step + j * src.elemSize()]]++;
	cv::cvtColor(src, src, cv::COLOR_HSV2BGR);

	// スケーリング定数(一番多い個数)の取得
	for (int i = 0; i < 180; i++)
		if(max < count[i])
			max = count[i];

	// スケーリング
	double histgram[180];
	for (int i = 0; i < 180; i++)
		histgram[i] = (double)count[i] / max * 200;


	/** 簡易グラフ作成 **/
	int gWidth = 180 * stepH;
	int gHeight = 200;
	// 格子画像
	cv::Mat baseGraph(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255));
	// 色相のヒストグラム画像
	cv::Mat hueGraph(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255));
	// 上記2つを乗算ブレンディングした最終的に描画する画像
	cv::Mat graph(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255));

	cv::cvtColor(hueGraph, hueGraph, cv::COLOR_BGR2HSV);
	for (int j = 0; j < hueGraph.rows; j++){
		for (int i = 0; i < hueGraph.cols; i++){
			hueGraph.data[j * hueGraph.step + i * hueGraph.elemSize() + 0] = (int)((double)i/stepH);
			hueGraph.data[j * hueGraph.step + i * hueGraph.elemSize() + 1] = 220;
			hueGraph.data[j * hueGraph.step + i * hueGraph.elemSize() + 2] = 180;
		}
	}
	cv::cvtColor(hueGraph, hueGraph, cv::COLOR_HSV2BGR);
	// 横のメモリ
	for (int i = 0; i < 20; i++)
		if(!(i%4))
			cv::line(baseGraph, cv::Point(0, i*10), cv::Point(gWidth, i*10), cv::Scalar(180, 180, 180), 2);
		else
			cv::line(baseGraph, cv::Point(0, i*10), cv::Point(gWidth, i*10), cv::Scalar(200, 200, 200), 1);
	// 色相のグラフ
	for (int i = 0; i < 180; i++)
		cv::line(hueGraph, cv::Point((int)(i*stepH), 0), cv::Point((int)(i*stepH), (int)histgram[i]), cv::Scalar(180, 180, 180), 2);
	// 折れ線
	for (int i = 0; i < 180; i++)
		cv::line(hueGraph, cv::Point((int)(i*stepH), (int)histgram[i]), cv::Point((int)((i+1)*stepH), (int)histgram[i+1]), cv::Scalar(90, 90, 90), 2, CV_AA);
	// 合成
	blend(baseGraph, hueGraph, graph, blendType::MULTI);
	// 上下を反転
	cv::flip(graph, graph, 0);

	drawForQtLabel(graph, ui.histgramH, false);
}
Ejemplo n.º 7
0
bool ParallaxErrorAnalysis_GradientPatch(const cv::Mat& img1,const shape& img1_shp, const cv::Mat& img2, const shape& img2_shp, const cv::Mat& img2_original,
        const cv::Mat& H, const cv::Size2i& img_size ,double& res_error)
{
    vector<cv::Point2f> overlap_points;


    for(int r = 0 ; r < img_size.height; r++)
    {
        for(int c = 0; c < img_size.width; c++)
        {
            shape t_img1_shape = img1_shp;
            shape t_img2_shape = img2_shp;
            cv::Point2f t_point(c, r);
            if(t_img1_shape.isInShape(c, r) && t_img2_shape.isInShape(c, r) )
            {
                // 记下这些点
                overlap_points.push_back(t_point);
            }
        }
    }

    // 找到这些点在 img2_original 上的对应点
    cv::Mat Hn = H.inv();
    vector<cv::Point2f> correPoints_ori(overlap_points.size(),*new cv::Point2f);
    cv::perspectiveTransform(overlap_points, correPoints_ori, Hn); // 两个参数都要是 vector<cv::Point2f> f 不能是 i

    //检测有没有越界的
    /*ofstream Dout("2.txt",ios::out);
    for(int i = 0; i < correPoints_ori.size(); i++)
    {
    	if(correPoints_ori[i].x < 0 || correPoints_ori[i].x > img2_original.cols
    		|| correPoints_ori[i].y < 0 || correPoints_ori[i].y > img2_original.rows)
    	{

    		Dout<< correPoints_ori[i].x << " "<<correPoints_ori[i].y << endl;
    	}
    }
    Dout.clear();
    Dout.close();*/

    // 计算 error
    res_error = 0;
    cv::Mat img_blend;
    vector<cv::Mat> imgs;
    vector<shape> shapes;
    imgs.push_back(img1);
    imgs.push_back(img2);
    shapes.push_back(img1_shp);
    shapes.push_back(img2_shp);
    blending_all(imgs, shapes, img_size, img_blend);

    // 求 Iij 和 Ij 的 Gradient 图
    cv::Mat img_blend_G;
    cv::Mat img2_original_G;
    gradientGray(img_blend, img_blend_G);
    gradientGray(img2_original, img2_original_G);

    // test 测试 到底有哪些点 不一样。
    vector<cv::Point2f> err_points_img2;
    vector<uchar> err_img2;
    // Gradient 图求 error
    double res = 0;
    for(int i = 0; i < overlap_points.size(); i++)
    {
        uchar Gray1 = img_blend_G.at<uchar>(overlap_points[i]);
        uchar Gray2 = img2_original_G.at<uchar>(correPoints_ori[i]);
        int tr = abs(Gray1 - Gray2);
        //if(tr != 0) // 很多是相差就1,这样基本一样的
        //{
        //	res += tr;
        //}
        if(tr >= 5) // 很多是相差就1,这样基本一样的
        {
            res += tr;
            err_points_img2.push_back(correPoints_ori[i]);
            err_img2.push_back(tr);
        }
    }

    res_error = res;

    // 测试的图
    // 新建一张 img_size 的黑色图
    IplImage* st_img = cvCreateImage(img2_original.size(), IPL_DEPTH_8U, 3);
    for(int i = 0; i < st_img->height; i++)
    {
        uchar *ptrImage = (uchar*)(st_img->imageData + i * st_img->widthStep);
        for (int j = 0; j < st_img->width; j++)
        {
            ptrImage[3 * j + 0]=0;
            ptrImage[3 * j + 1]=0;
            ptrImage[3 * j + 2]=0;
        }
    }
    cv::Mat test_img = st_img;
    for(int i = 0; i < err_points_img2.size(); i++)
    {
        test_img.at<Vec3b>(err_points_img2[i])[2] = err_img2[i];
    }
    imwrite("error_ori.jpg",img2_original);
    imwrite("error_test.jpg",test_img);
    //end test

    return true;
}
Ejemplo n.º 8
0
//void CutoutImage::rotateMat (const cv::Mat srcMat ,cv::Mat &dstMat,const cv::Mat colorMat)
void CutoutImage::rotateMat (const cv::Mat srcMat ,cv::Mat &dstMat,const cv::Mat colorMat, const cv::Mat wholeImage, cv::Mat &wholeImageCut)
{
    std::vector<std::vector<cv::Point>> contours;
    std::vector<cv::Vec4f> lineVector;
    cv::Mat aMat = srcMat.clone();
    cv::findContours(aMat, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
    cv::Mat showMat = cv::Mat(aMat.rows,aMat.cols,CV_8UC3,cv::Scalar(0,0,0));
    //dstMat = aMat.clone();
    dstMat = cv::Mat(srcMat.size(), CV_8UC4, cv::Scalar(0,0,0,0));
    std::vector<cv::Rect> nailRect;
    cv::Mat wholeImageRotate;
    for(int i = 0;i<(int)contours.size();i++)
    {
        if(contours[i].size() > 5)
        {
            cv::RotatedRect tmp = cv::minAreaRect(contours[i]);
            //ellipses.push_back(temp);
            cv::drawContours(showMat, contours, i, cv::Scalar(255,0,0), 1, 8);
            cv::ellipse(showMat, tmp, cv::Scalar(0,255,255), 2, 8);
            //cv::line(<#cv::Mat &img#>, <#Point pt1#>, <#Point pt2#>, <#const Scalar &color#>)
            cv::rectangle(showMat, tmp.boundingRect(), cv::Scalar(255,255,0),1,8);
            //imshow("Ellipses", showMat);
            float rotAngle = tmp.angle;
            tmp.angle = 0;
            //cv::circle(showMat,  cv::Point(tmp.boundingRect().x,tmp.boundingRect().y) , 2, cv::Scalar(0,0,255));
            if(tmp.boundingRect().width > tmp.boundingRect().height)
            {
                tmp.angle = 90;
                rotAngle = rotAngle - 90;
            }
            cv::rectangle(showMat, tmp.boundingRect(), cv::Scalar(255,255,0),1,8);
            nailRect.push_back(tmp.boundingRect());
            cv::ellipse(showMat, tmp, cv::Scalar(255,255,255), 2, 8);
            //imshow("Ellipses", showMat);
            //cv::Mat rotMat = cv::Mat(2,3,CV_32FC1);
            cv::Mat rotMat =   cv::getRotationMatrix2D(tmp.center,rotAngle, 1);
            //cv::transform(srcMat, dstMat, rotMat);
            cv::warpAffine(colorMat, dstMat, rotMat, cv::Size(std::max(srcMat.rows,srcMat.cols),std::max(srcMat.rows,srcMat.cols)), CV_INTER_NN);
            cv::warpAffine(wholeImage.clone(), wholeImageRotate, rotMat, cv::Size(std::max(srcMat.rows,srcMat.cols),std::max(srcMat.rows,srcMat.cols)), CV_INTER_LANCZOS4);
            //cv::imshow("RRRRRR", dstMat);
            
        }
        else
        {
            // cv::drawContours(showMat, contours, i, cv::Scalar(255,255,255), -1, 8);
            // imshow("Ellipses", showMat);
        }
    }
   
    cv::RNG rng; //随机数
    int rows = dstMat.rows;
    int cols = dstMat.cols;
   // printf(" b  cutImageByRect rows = %d \n",rows);
   // printf(" b  cutImageByRect cols = %d \n",cols);
    
    int lx = cols;
    int rx = 0;
    int ty = rows;
    int by = 0;
    
    cv::Mat grayDst;
    cv::cvtColor(dstMat, grayDst, CV_BGRA2GRAY);
    
    for(int y = 0; y<rows; y++ ){
        uchar *grayDstRowsData = grayDst.ptr<uchar>(y);
        for(int x = 0; x<cols; x++ ){
            if(grayDstRowsData[x] != 0 )
            {
                if(x<lx){
                    lx = x;
                }
                if(x>rx){
                    rx = x;
                }
                if(y<ty){
                    ty = y;
                }
                if(y>by){
                    by = y;
                }
            }
        }
    }
    //扩大一下截图范围
    if(lx - 10 >= 0)
        lx = lx - 10;
    if(rx + 10 <= cols - 1)
        rx = rx + 10;
    if(ty - 10 >= 0)
        ty = ty - 10;
    if(by + 10 <= rows - 1)
        by = by + 10;
    
    //cv::Point lt = cv::Point(lx,ty);
    //cv::Point rb = cv::Point(rx,by);
   
    cv::Rect cutRect = *new cv::Rect;
  
    cutRect.x = lx;
    cutRect.y = ty;
    cutRect.width = rx - lx + 1;
    cutRect.height = by - ty + 1;
 
    std::cout<< cutRect <<std::endl;
    
    cv::Mat cutDst;
    cv::Mat wholeImageRotateBGRA;
    cutImageByRect(dstMat, cutRect, cutDst); //截图
    addAlphaChannle(wholeImageRotate, wholeImageRotateBGRA);
    cutImageByRect(wholeImageRotateBGRA, cutRect, wholeImageCut); //截图
    
    dstMat = cutDst.clone();
    //cv::rectangle(dstMat, lt, rb, cv::Scalar(0,0,255,0));
    //cv::imshow("RRRRRR", dstMat);
}
Ejemplo n.º 9
0
/*
 将输入的二值图边缘平滑,用锐利边缘抠取输入的彩色图,然后再将彩色图与平滑边缘的图进行融合
 */
void CutoutImage::filterImageEdgeAndBlurMerge( const cv::Mat colorMat, const cv::Mat bitMat, cv::Mat &dstMat )
{
    cv::Mat aBitMat = bitMat.clone();
    cv::Mat aColorMat = colorMat.clone();
    cv::Mat filterMat;
    //CutoutImage::filterImage(aBitMat, filterMat); //使预模糊区域大于正常区域
    filterMat = aBitMat;  //使预模糊区域与正常区域一样大
    
    std::cout<<"aColorMat channels =  " <<aColorMat.channels()<<std::endl;
    int blockSize = 5;
    int constValue = 10;
    //    cv::adaptiveThreshold( filterMat, filterMat, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, blockSize, constValue );
    cv::threshold(filterMat, filterMat, 1, 255, CV_THRESH_BINARY );
    //jiangbo test
    cv::Mat tmpMMat;
    //CutoutImage::smoothContours( filterMat, tmpMMat );
    
    int rows = aBitMat.rows;
    int cols = aBitMat.cols;
    //扣取范围较大的彩色图,并进行模糊,主要需要其边缘部分数据
    cv::Mat cutBigColorMat = cv::Mat( rows , cols, CV_8UC3, cv::Scalar(255,255,255) );
    for (int y = 0; y < rows; y++) {
        uchar *filterCutMatRowData = filterMat.ptr<uchar>(y);
        uchar *colorMatRowData = aColorMat.ptr<uchar>(y);
        uchar *cutBigColorMatRowData = cutBigColorMat.ptr<uchar>(y);
        for (int x = 0; x < cols; x++) {
            if(filterCutMatRowData[x] != 0){
                cutBigColorMatRowData[x*3]      = colorMatRowData[x*3];
                cutBigColorMatRowData[x*3 + 1]  = colorMatRowData[x*3 + 1];
                cutBigColorMatRowData[x*3 + 2]  = colorMatRowData[x*3 + 2];
            }
        }
    }
    cv::Mat cutBigColorMatFilter;
    //CutoutImage::filterImage(cutBigColorMat, cutBigColorMatFilter);
    CutoutImage::filterImageForEdgeBlur(cutBigColorMat, cutBigColorMatFilter);
    
    
    cv::Mat bgrFilterMat;
    cv::cvtColor(filterMat, bgrFilterMat, CV_GRAY2BGR);
    cv::Mat smoothMask;
    CutoutImage::smoothContours(colorMat, bgrFilterMat, 21 , tmpMMat, smoothMask);
    
    
    cv::Mat fooRusultMat = cv::Mat( rows , cols, CV_8UC3, cv::Scalar(0,0,0) );
    cv::Mat testEdgeData= cv::Mat( rows , cols, CV_8UC3, cv::Scalar(0,0,0) );
    //融合
    for(int y = 0; y < rows; y++){
        uchar *aBitMatRowData = aBitMat.ptr<uchar>(y);
        uchar *colorMatRowData = aColorMat.ptr<uchar>(y);
        uchar *cutBigColorMatRowData = cutBigColorMatFilter.ptr<uchar>(y);
        uchar *fooRusultMatRowData = fooRusultMat.ptr<uchar>(y);
        uchar *testEdgeDataRowData = testEdgeData.ptr<uchar>(y);
        for(int x = 0; x < cols; x++){
            if(aBitMatRowData[x] != 0){
                fooRusultMatRowData[x*3]     = colorMatRowData[x*3];
                fooRusultMatRowData[x*3 + 1] = colorMatRowData[x*3 + 1];
                fooRusultMatRowData[x*3 + 2] = colorMatRowData[x*3 + 2];
            }
            
            if(cutBigColorMatRowData[x*3] != 255 || cutBigColorMatRowData[x*3 + 1] != 255 || cutBigColorMatRowData[x*3 + 2] != 255){
                if(aBitMatRowData[x] == 0){
                    fooRusultMatRowData[x*3]     = cutBigColorMatRowData[x*3];
                    fooRusultMatRowData[x*3 + 1] = cutBigColorMatRowData[x*3 + 1];
                    fooRusultMatRowData[x*3 + 2] = cutBigColorMatRowData[x*3 + 2];
                    testEdgeDataRowData[x*3]     = cutBigColorMatRowData[x*3];
                    testEdgeDataRowData[x*3 + 1] = cutBigColorMatRowData[x*3 + 1];
                    testEdgeDataRowData[x*3 + 2] = cutBigColorMatRowData[x*3 + 2];
                }
            }
        }
    }
}
Ejemplo n.º 10
0
void CutoutImage::setColorImg(cv::Mat colorImg)
{
    _cloverGrabCut->setImage(colorImg);
    inputColorImageSize = colorImg.size();
}
Ejemplo n.º 11
0
void CutoutImage::processImageCreatMask( std::vector<cv::Point> mouseSlideRegionDiscrete , const cv::Mat srcMat, cv::Mat &dstMat, int lineWidth, int expandWidth )
{
    cv::Mat showMat;// = srcMat.clone();
    cv::cvtColor(srcMat, showMat, CV_BGR2GRAY);
    cv::Mat showMergeColorImg = srcMat.clone();
    cv::Mat seedStoreMat = dstMat;   //seedStoreMat 在外部存储了本次操作所生成的全部种子点。
    mouseSlideRegion.clear();
    
    cv::Size matSize = *new cv::Size;
    matSize.width = showMat.cols;
    matSize.height = showMat.rows;
    CutoutImage::drawLineAndMakePointSet(mouseSlideRegionDiscrete,matSize,lineWidth,mouseSlideRegion);
    int lx = showMat.cols,rx = 0,ty = showMat.rows,by = 0;
    //求画线范围
    for(int i = 0;i<(int)mouseSlideRegion.size();i++)
    {
        //std::cout<<"point  = "<< mouseSlideRegion[i]<<std::endl;
        //cv::circle(showMatClone, mouseSlideRegion[i], 0.5, cv::Scalar(255)); //绘制现实点
        //最左面点x,最右面点x,最上面点y,最下面点y
        if(mouseSlideRegion[i].x < lx)
        {
            lx = mouseSlideRegion[i].x;
        }
        if(mouseSlideRegion[i].x > rx)
        {
            rx = mouseSlideRegion[i].x;
        }
        if(mouseSlideRegion[i].y <ty )
        {
            ty = mouseSlideRegion[i].y;
        }
        if(mouseSlideRegion[i].y > by){
            by = mouseSlideRegion[i].y;
        }
        //CvPoint forePtsCvPoint =
    }
    std::cout<<" lx " << lx << " rx " << rx << " ty " << ty << " by " << by <<std::endl;
    std::cout<<" orgMat cols " <<showMat.cols<< "orgMat rows " << showMat.rows <<std::endl;
    
    if( lx - expandWidth >= 0 )
        lx = lx - expandWidth;
  
    if( rx + expandWidth <= showMat.cols - 1 )
        rx = rx + expandWidth;
   
    if( ty - expandWidth >= 0)
        ty = ty - expandWidth;
    
    if( by + expandWidth <= showMat.rows - 1 )
        by = by + expandWidth;

    std::cout<<" lx " << lx << " rx " << rx << " ty " << ty << " by " << by <<std::endl;
    
    cv::Point ltP = cv::Point(lx,ty);
    cv::Point rtP = cv::Point(rx,ty);
    cv::Point lbP = cv::Point(lx,by);
    cv::Point rbP = cv::Point(rx,by);
    //要截取的图形
    int rectMatRow = by - ty + 1;
    int rectMatCol = rx - lx + 1;
    cv::Mat recMat = cv::Mat (rectMatRow,rectMatCol,CV_8UC1,cv::Scalar(0));
    //cv::rectangle(showMatClone, ltP, rbP, cv::Scalar(255),1); //画图形
    cv::Mat mouseSlideSeedStoreMat = cv::Mat(rectMatRow,rectMatCol,CV_8UC1,cv::Scalar(0));
    for(int y = 0;y<rectMatRow;y++){
        uchar *rectMatLineData = recMat.ptr<uchar>(y);
        uchar *orgMatLineData = showMat.ptr<uchar>(ty+y);
        uchar *msssMatLineData = mouseSlideSeedStoreMat.ptr<uchar>(y);
        uchar *ssMatLineData = seedStoreMat.ptr<uchar>(ty+y);
        for(int x = 0; x < rectMatCol; x++){
            rectMatLineData[x] = orgMatLineData[lx+x];
            msssMatLineData[x] = ssMatLineData[lx + x];
        }
    }
    //cutMat = recMat.clone();
    //cv::imshow("mouseSlideSeedStoreMat", mouseSlideSeedStoreMat);
    
    cv::Mat bitMat;
    int blockSize = 25;
    int constValue = 10;
    cv::adaptiveThreshold(recMat, bitMat, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, blockSize, constValue);
    
    cv::Mat filterImg;
    CutoutImage::filterImage(recMat,filterImg);
    cv::Mat nextImg = filterImg.clone();
    cv::Mat regionGrowMat;
    CutoutImage::rectRegionGrow( mouseSlideRegion, ltP, filterImg, mouseSlideSeedStoreMat ,regionGrowMat);
   // cv::imshow("regionGrowMat", regionGrowMat);
    cv::Mat mergeMat;
    CutoutImage::mergeProcess(regionGrowMat,mergeMat);
   // cv::imshow("mergeMat", mergeMat);
    CutoutImage::storeSeed(seedStoreMat,mergeMat,ltP); // seedStoreMat 需要扣取的mask
   // cv::imshow("seedStoreMat", seedStoreMat);
    //cv::imshow("showMat", showMatClone);
    cv::Mat colorMergeMat;
    CutoutImage::colorDispResultWithFullSeedMat(showMergeColorImg,seedStoreMat);
}
Ejemplo n.º 12
0
	/// <summary>
	/// Calculates for each image patch the focus measure value. Result is a vector of Patches (see class Patch).
	/// If the image is binary, the relative foreground (foreground pixel / patchSize) and the weight is stored for each Patch.
	/// </summary>
	/// <param name="fm">The specified focuse measure method fm (e.g. Brenner).</param>
	/// <param name="fmImg">The image fmImg to calculate the focus measure on. If empty, the src image is taken.</param>
	/// <param name="binary">if set to <c>true</c> [binary] the input image is binary, specifying the foreground image. The foreground area and the weight is saved to the image patch</param>
	/// <returns>True if the focus measure could be computed, false otherwise.</returns>
	bool FocusEstimation::compute(FocusMeasure fm, cv::Mat fmImg, bool binary)
	{

		cv::Mat fImg = fmImg;
		if (fImg.empty())
			fImg = mSrcImg;

		if (fImg.empty())
			return false;

		if (fmImg.channels() != 1 || fImg.depth() != CV_32F)
			return false;

		BasicFM fmClass;
		double f;
		mFmPatches.clear();

		for (int row = 0; row < fImg.rows; row += (mWindowSize+mSplitSize)) {
			for (int col = 0; col < fImg.cols; col += (mWindowSize+mSplitSize)) {

				cv::Range rR(row, cv::min(row + mWindowSize, fImg.rows));
				cv::Range cR(col, cv::min(col + mWindowSize, fImg.cols));

				cv::Mat tile = fImg(rR, cR);

				fmClass.setImg(tile);

				switch (fm)
				{
				case dsc::FocusEstimation::BREN:
					f = fmClass.computeBREN();
					break;
				case dsc::FocusEstimation::GLVA:
					f = fmClass.computeGLVA();
					break;
				case dsc::FocusEstimation::GLVN:
					f = fmClass.computeGLVN();
					break;
				case dsc::FocusEstimation::GLLV:
					f = fmClass.computeGLLV();
					break;
				case dsc::FocusEstimation::GRAT:
					f = fmClass.computeGRAT();
					break;
				case dsc::FocusEstimation::GRAS:
					f = fmClass.computeGRAS();
					break;
				case dsc::FocusEstimation::LAPE:
					f = fmClass.computeLAPE();
					break;
				case dsc::FocusEstimation::LAPV:
					f = fmClass.computeLAPV();
					break;
				case dsc::FocusEstimation::ROGR:
					f = fmClass.computeROGR();
					break;
				default:
					f = -1;
					break;
				}

				Patch r(cv::Point(col, row), mWindowSize, mWindowSize, f);

				if (binary) {
					cv::Scalar relArea = cv::sum(tile);
					r.setArea(relArea[0]);
					relArea[0] = relArea[0] / (double)(mWindowSize * mWindowSize);
					
					//area completely written with text ~ 0.1
					//normalize to 1
					relArea[0] *= 10.0;
					r.setWeight(relArea[0]);

					//weight with sigmoid function
					//-6: shift sigmoid to the right
					//*10: scale normalized Area
					//double a = 10.0;
					//double b = -6.0;
					//double weight = 1.0 / (1 + std::exp(-(relArea[0] * a + b)));
					//r.setWeight(weight);
				}


				mFmPatches.push_back(r);
			}
		}

		return true;
	}
Ejemplo n.º 13
0
cv::Point EyeTracker::computePupilLocation(cv::Mat eye) {
    cv::Mat x_gradient = computeMaxGradient(eye);
    cv::Mat y_gradient = computeMaxGradient(eye.t()).t();
    cv::Mat magnitude = computeMagnitudes(x_gradient, y_gradient);
    double gradient_threshold =
        computeDynamicThreshold(magnitude, 50.0);
    resizeAndRender(magnitude, "sample_eye_gradient_mag");
    DEBUG("Gradient threshold: " << gradient_threshold);
    
    for (int y = 0; y < eye.rows; ++y) {
        double* x_row = x_gradient.ptr<double>(y);
        double* y_row = y_gradient.ptr<double>(y);
        const double* mag_row = magnitude.ptr<double>(y);
        for (int x = 0; x < eye.cols; ++x) {
            double gX = x_row[x];
            double gY = y_row[x];
            double m = mag_row[x];
            if (m > gradient_threshold) {
                x_row[x] = gX / m;
                y_row[x] = gY / m;
            } else {
                x_row[x] = 0;
                y_row[x] = 0;
            }
        }
    }
    
    resizeAndRender(x_gradient, "sample_eye_gradient_x");
    resizeAndRender(y_gradient, "sample_eye_gradient_y");
    
    cv::Mat weight;
    cv::GaussianBlur(eye, weight,
                     cv::Size(5, 5), 0, 0);
    for (int y = 0; y < weight.rows; ++y) {
        unsigned char* row = weight.ptr<unsigned char>(y);
        for (int x = 0; x < weight.cols; ++x) {
            row[x] = (255 - row[x]);
        }
    }
    
    resizeAndRender(weight, "sample_eye_weight");
    
    cv::Mat out_sum = cv::Mat::zeros(eye.rows, eye.cols, CV_64F);
    for (int y = 0; y < weight.rows; ++y) {
        const double* Xr = x_gradient.ptr<double>(y);
        const double* Yr = y_gradient.ptr<double>(y);
        for (int x = 0; x < weight.cols; ++x) {
            double gX = Xr[x];
            double gY = Yr[x];
            if (gX == 0.0 && gY == 0.0) {
                continue;
            }
            test_center(x, y, weight, gX, gY, out_sum);
        }
    }
    
    double gradients_num = weight.rows * weight.cols;
    cv::Mat out;
    out_sum.convertTo(out, CV_32F, 1.0 / gradients_num);
    
    cv::Point max_point;
    double max_value;
    cv::minMaxLoc(out, NULL, &max_value, NULL, &max_point);
    
    cv::Mat flood_clone;
    double flood_thresh = max_value * 0.97;
    cv::threshold(out, flood_clone, flood_thresh, 0.0f, cv::THRESH_TOZERO);
    cv::Mat mask = floodKillEdges(flood_clone);
    
    cv::minMaxLoc(out, NULL, &max_value, NULL, &max_point, mask);
    
    resizeAndRender(mask, "sample_eye_mask");
    resizeAndRender(out, "sample_eye_possible_centers");
    return max_point;
}
Ejemplo n.º 14
0
void Depth::calcPointCloud(
    const cv::Mat& input_disparity, const cv::Mat& left_image,
    const double baseline, const double focal_length, const int cx,
    const int cy, pcl::PointCloud<pcl::PointXYZRGB>* pointcloud,
    pcl::PointCloud<pcl::PointXYZRGB>* freespace_pointcloud) {
  pointcloud->clear();
  freespace_pointcloud->clear();

  if (left_image.depth() != CV_8U) {
    ROS_ERROR(
        "Pointcloud generation is currently only supported on 8 bit images");
    return;
  }

  cv::Mat disparity_filled, input_valid;
  bulidFilledDisparityImage(input_disparity, &disparity_filled, &input_valid);

  int side_bound = sad_window_size_ / 2;

  // build pointcloud
  for (int y_pixels = side_bound; y_pixels < input_disparity.rows - side_bound;
       ++y_pixels) {
    for (int x_pixels = side_bound + min_disparity_ + num_disparities_;
         x_pixels < input_disparity.cols - side_bound; ++x_pixels) {
      const uint8_t& is_valid = input_valid.at<uint8_t>(y_pixels, x_pixels);
      const int16_t& input_value =
          input_disparity.at<int16_t>(y_pixels, x_pixels);
      const int16_t& filled_value =
          disparity_filled.at<int16_t>(y_pixels, x_pixels);

      bool freespace;
      double disparity_value;

      // if the filled disparity is valid it must be a freespace ray
      if (filled_value < std::numeric_limits<int16_t>::max()) {
        disparity_value = static_cast<double>(filled_value);
        freespace = true;
      }
      // else it is a normal ray
      else if (is_valid) {
        disparity_value = static_cast<double>(input_value);
        freespace = false;
      } else {
        continue;
      }

      pcl::PointXYZRGB point;

      // the 16* is needed as opencv stores disparity maps as 16 * the true
      // values
      point.z = (16 * focal_length * baseline) / disparity_value;
      point.x = point.z * (x_pixels - cx) / focal_length;
      point.y = point.z * (y_pixels - cy) / focal_length;

      if (left_image.channels() == 3) {
        const cv::Vec3b& color = left_image.at<cv::Vec3b>(y_pixels, x_pixels);
        point.b = color[0];
        point.g = color[1];
        point.r = color[2];
      } else if (left_image.channels() == 4) {
        const cv::Vec4b& color = left_image.at<cv::Vec4b>(y_pixels, x_pixels);
        point.b = color[0];
        point.g = color[1];
        point.r = color[2];
      } else {
        point.b = left_image.at<uint8_t>(y_pixels, x_pixels);
        point.g = point.b;
        point.r = point.b;
      }

      if (freespace) {
        freespace_pointcloud->push_back(point);
      } else {
        pointcloud->push_back(point);
      }
    }
  }
}
Ejemplo n.º 15
0
bool QrExtractor::extract( const cv::Mat & img )
{
    if ( pd->debug )
        pd->orig = img.clone();

    cv::cvtColor( img, pd->gray, CV_RGB2GRAY );

    if ( pd->smoothSz > 0 )
    {
        // Median accepts only odd values.
        cv::medianBlur( pd->gray, pd->blurred, (pd->smoothSz & 1) ? pd->smoothSz: pd->smoothSz+1 );
        cv::blur( pd->blurred, pd->blurred, cv::Size( pd->smoothSz, pd->smoothSz ) );
    }
    else
        pd->blurred = pd->gray;

    if ( !(pd->tresholdWndSz & 1) )
        pd->tresholdWndSz |= 1;
    cv::adaptiveThreshold( pd->blurred, pd->blurred, 255,
                           cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY,
                           pd->tresholdWndSz, 0.0 );

    
    pd->blurredSaved = pd->blurred.clone();
    cv::findContours( pd->blurred, pd->contours, pd->hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );

    //if ( pd->debug )
    //{
    //    for( unsigned i = 0; i<pd->contours.size(); i++ )
    //    {
    //        // Random color.
    //        Scalar color = Scalar( pd->rng.uniform(0, 255), pd->rng.uniform(0,255), pd->rng.uniform(0,255) );
    //        // Draw contour function.
    //        drawContours( pd->orig, pd->contours, i, color, 1, 8, pd->hierarchy, 0, Point() );
    //    }
    //}

    // Look for patterns.
    // There should be 3 squares. And each is a "pyramid of 3 squares" in turn.
    const double s1_s2_min = 1.0; //4.0/3.0;
    const double s1_s2_max = 6.0; //6.0/3.0;
    const double s2_s3_min = 1.0; //5.0/3.0;
    const double s2_s3_max = 6.0; //2.0/3.0;
    vector<Point> & markers = pd->markers;
    markers.clear();
    for ( unsigned s3Ind=0; s3Ind<pd->hierarchy.size(); s3Ind++ )
    {
        const Vec4i s3 = pd->hierarchy[s3Ind];
        // If there is parent;
        if (s3[3] < 0)
            continue;
        const unsigned s2Ind = s3[3];
        const Vec4i s2 = pd->hierarchy[ s2Ind ];
        // If there is parent;
        if (s2[3] < 0)
            continue;
        const unsigned s1Ind = s2[3];
        const Vec4i s1 = pd->hierarchy[ s1Ind ];

        // Compare areas.
        const double s3Area = contourArea( pd->contours[ s3Ind ] );
        if ( s3Area < 1.0 )
            continue;
        const double s2Area = contourArea( pd->contours[ s2Ind ] );
        if ( s2Area < 1.0 )
            continue;
        const double ratio2_3 = s2Area/s3Area;
        if ( ( ratio2_3 < s2_s3_min ) || ( ratio2_3 > s2_s3_max ) )
            continue;

        const double s1Area = contourArea( pd->contours[ s1Ind ] );
        const double ratio1_2 = s1Area/s2Area;
        if ( ( ratio1_2 < s1_s2_min ) || ( ratio1_2 > s1_s2_max ) )
            continue;

        Moments mu;
        double x, y;

        mu = moments( pd->contours[ s1Ind ] );
        x = mu.m10 / mu.m00;
        y = mu.m01 / mu.m00;

        mu = moments( pd->contours[ s2Ind ] );
        x += mu.m10 / mu.m00;
        y += mu.m01 / mu.m00;

        mu = moments( pd->contours[ s3Ind ] );
        x += mu.m10 / mu.m00;
        y += mu.m01 / mu.m00;

        x *= 0.333333333;
        y *= 0.333333333;

        markers.push_back( Point( static_cast<int>(x), static_cast<int>(y) ) );
        if ( markers.size() > 2 )
            break;
    }

    // Sort markers.
    // First one should be middle one.
    // Second - one along X axis.
    // And the last one along (-Y) axis.
    if ( markers.size() > 2 )
    {
        int indO = PD::detectO( pd->blurredSaved, markers );
        const Point & o = markers[ indO ];
        int indX = (indO+1)%3;
        int indY = (indO+2)%3;
        const Point & x = markers[ indX ];
        const Point & y = markers[ indY ];
        bool leftRf = PD::detectXy( o, x, y );
        if ( !leftRf )
        {
            int t = indX;
            indX = indY;
            indY = t;
        }
        markers.push_back( markers[ indO ] );
        markers.push_back( markers[ indX ] );
        markers.push_back( markers[ indY ] );
        markers[0] = markers[3];
        markers[1] = markers[4];
        markers[2] = markers[5];
        markers.resize( 3 );
    }


    if ( pd->debug && ( markers.size() > 2 ) )
    {
        cv::line( pd->orig, markers[0], markers[1], Scalar( 0, 0, 255 ), 3 );
        cv::line( pd->orig, markers[0], markers[2], Scalar( 0, 255, 0 ), 3 );
        //cv::line( pd->orig, markers[2], markers[0], Scalar( 0, 0, 255 ), 3 );
    }
    if ( pd->debug )
    {
        cv::imshow( "QrExtractor::orig",     pd->orig );
        cv::imshow( "QrExtractor::filtered", pd->blurredSaved );
    }

    bool found = (markers.size() > 2);
    return found;
}
/*
    Perform one thinning iteration.
    param im Binary image with range
 */
void thinningIteration(cv::Mat& img, int iter)
{
    CV_Assert(img.channels() == 1);
    CV_Assert(img.depth() != sizeof(uchar));
    CV_Assert(img.rows > 3 && img.cols > 3);
    
    cv::Mat marker = cv::Mat::zeros(img.size(), CV_8UC1);
    
    int nRows = img.rows;
    int nCols = img.cols;
    
    if (img.isContinuous()) {
        nCols *= nRows;
        nRows = 1;
    }
    
    int x, y;
    uchar *pAbove;
    uchar *pCurr;
    uchar *pBelow;
    uchar *nw, *no, *ne;    // north (pAbove)
    uchar *we, *me, *ea;
    uchar *sw, *so, *se;    // south (pBelow)
    
    uchar *pDst;
    
    // initialize row pointers
    pAbove = NULL;
    pCurr  = img.ptr<uchar>(0);
    pBelow = img.ptr<uchar>(1);
    
    for (y = 1; y < img.rows-1; ++y) {
        // shift the rows up by one
        pAbove = pCurr;
        pCurr  = pBelow;
        pBelow = img.ptr<uchar>(y+1);
        
        pDst = marker.ptr<uchar>(y);
        
        // initialize col pointers
        no = &(pAbove[0]);
        ne = &(pAbove[1]);
        me = &(pCurr[0]);
        ea = &(pCurr[1]);
        so = &(pBelow[0]);
        se = &(pBelow[1]);
        
        
        for (x = 1; x < img.cols-1; ++x) {
            // shift col pointers left by one (scan left to right)
            nw = no;
            no = ne;
            ne = &(pAbove[x+1]);
            we = me;
            me = ea;
            ea = &(pCurr[x+1]);
            sw = so;
            so = se;
            se = &(pBelow[x+1]);
            
            int A  = (*no == 0 && *ne == 1) + (*ne == 0 && *ea == 1) +
            (*ea == 0 && *se == 1) + (*se == 0 && *so == 1) +
            (*so == 0 && *sw == 1) + (*sw == 0 && *we == 1) +
            (*we == 0 && *nw == 1) + (*nw == 0 && *no == 1);
            int B  = *no + *ne + *ea + *se + *so + *sw + *we + *nw;
            int m1 = iter == 0 ? (*no * *ea * *so) : (*no * *ea * *we);
            int m2 = iter == 0 ? (*ea * *so * *we) : (*no * *so * *we);
            
            if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0)
                pDst[x] = 1;
        }
    }
    
    img &= ~marker;
}
Ejemplo n.º 17
0
Archivo: hist.cpp Proyecto: sensq/home
/**
@brief RGBのヒストグラムを計算する
@param src ヒストグラムを計算する画像
*/
void HIST::calcHistgram(cv::Mat &src)
{
	if(src.data == NULL)
		return;
	
	// グラフのデータ間の距離
	step = (double)ui.histgramRGB->width()/256;

	int count[256][3];
	int max = 0;

	// 初期化
	for (int i = 0; i < 256; i++)
		for (int c = 0; c < 3; c++)
			count[i][c] = 0;

	// RGBごとに輝度の個数を取得
	for (int j = 0; j < src.cols; j++)
		for (int i = 0; i < src.rows; i++)
			for (int c = 0; c < 3; c++)
				count[src.data[i * src.step + j * src.elemSize() + c]][c]++;

	// 0と255はだいたい極端に多くなるので無視する
	for (int c = 0; c < 3; c++)
		count[0][c] = count[255][c] = 0;

	// スケーリング定数(一番多い輝度の個数)の取得
	for (int i = 0; i < 256; i++)
		for (int c = 0; c < 3; c++)
			if(max < count[i][c])
				max = count[i][c];

	// スケーリング
	double histgram[256][3];
	for (int i = 0; i < 256; i++)
		for (int c = 0; c < 3; c++)
			histgram[i][c] = (double)count[i][c] / max * 200;


	/** 簡易グラフ作成 **/
	int gWidth = 256 * step;
	int gHeight = 200;
	// 格子画像
	cv::Mat baseGraph(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255));
	// RGBごとのヒストグラム画像
	cv::Mat rgbGraph[3] = {
		cv::Mat(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255)),
		cv::Mat(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255)),
		cv::Mat(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255))
	};
	// 上記4つを乗算ブレンディングした最終的に描画する画像
	cv::Mat graph(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255));

	// 横のメモリ
	for (int i = 0; i < 20; i++)
		if(!(i%4))
			cv::line(baseGraph, cv::Point(0, i*10), cv::Point(gWidth, i*10), cv::Scalar(180, 180, 180), 2);
		else
			cv::line(baseGraph, cv::Point(0, i*10), cv::Point(gWidth, i*10), cv::Scalar(200, 200, 200), 1);
	// 縦のメモリ
	for (int i = 0; i < 5; i++)
		cv::line(baseGraph, cv::Point(i*50*step, 0), cv::Point(i*50*step, gHeight), cv::Scalar(180, 180, 180), 2);
	// RGBごとのグラフ
	for (int i = 0; i < 256; i++){
		cv::line(rgbGraph[0], cv::Point(i*step, 0), cv::Point(i*step, (int)histgram[i][0]), cv::Scalar(255, 200, 200), 2);
		cv::line(rgbGraph[1], cv::Point(i*step, 0), cv::Point(i*step, (int)histgram[i][1]), cv::Scalar(200, 255, 200), 2);
		cv::line(rgbGraph[2], cv::Point(i*step, 0), cv::Point(i*step, (int)histgram[i][2]), cv::Scalar(200, 200, 255), 2);
	}
	// 折れ線
	for (int i = 0; i < 255; i++){
		cv::line(rgbGraph[0], cv::Point(i*step, (int)histgram[i][0]), cv::Point((i+1)*step, (int)histgram[i+1][0]), cv::Scalar(255, 30, 30), 2, CV_AA);
		cv::line(rgbGraph[1], cv::Point(i*step, (int)histgram[i][1]), cv::Point((i+1)*step, (int)histgram[i+1][1]), cv::Scalar(30, 255, 30), 2, CV_AA);
		cv::line(rgbGraph[2], cv::Point(i*step, (int)histgram[i][2]), cv::Point((i+1)*step, (int)histgram[i+1][2]), cv::Scalar(30, 30, 255), 2, CV_AA);
	}
	// 合成
	cv::Mat tmp;
	blend(rgbGraph[0], rgbGraph[1], tmp, blendType::MULTI);
	blend(tmp, rgbGraph[2], tmp, blendType::MULTI);
	blend(baseGraph, tmp, graph, blendType::MULTI);
	// 上下を反転
	cv::flip(graph, graph, 0);

	drawForQtLabel(graph, ui.histgramRGB, false);
}
Ejemplo n.º 18
0
float pixkit::qualityassessment::MSSIM(const cv::Mat &src1, const cv::Mat &src2, int HVSsize, double* lu_co_st)
{
	//////////////////////////////////////////////////////////////////////////
	// exception
	if(src1.empty()||src2.empty()){
		CV_Error(CV_HeaderIsNull,"[qualityassessment::MSSIM] image is empty");
	}
	if(src1.cols != src2.cols || src1.rows != src2.rows){
		CV_Error(CV_StsBadArg,"[qualityassessment::MSSIM] sizes of two images are not equal");
	}
	if(src1.type()!=CV_8U || src2.type()!=CV_8U){
		CV_Error(CV_BadNumChannels,"[qualityassessment::MSSIM] image should be grayscale");
	}

	//////////////////////////////////////////////////////////////////////////
	const int L =255;
	double C1 = (0.01*L)*(0.01*L);		//C1 = (K1*L)^2, K1=0.01, L=255(for 8-bit grayscale)
	double C2 = (0.03*L)*(0.03*L);		//C1 = (K2*L)^2, K2=0.03, L=255(for 8-bit grayscale)
	double C3 = C2 / 2.0;
	int HalfSize = static_cast<int>(HVSsize/2);

	// gaussian filter
	///////////////////////////////////////////////////
	// HVS filter
	std::vector< std::vector<double> > gaussianFilter( HVSsize, std::vector<double>(HVSsize) );
	double sum = 0, STD = 1.5 ;

	for (int i=-HalfSize; i<=HalfSize; i++){
		for (int j=-HalfSize; j<=HalfSize; j++){	
			gaussianFilter[i+HalfSize][j+HalfSize] = exp( -1 * (i*i+j*j) / (2*STD*STD) );
			sum += gaussianFilter[i+HalfSize][j+HalfSize];
		}
	}

	// Normalize to 0~1
	for (int i=-HalfSize; i<=HalfSize; i++){
		for (int j=-HalfSize; j<=HalfSize; j++){	
			gaussianFilter[i+HalfSize][j+HalfSize] /= sum;
		}
	}
	/////////////////////////////////////////////////////

	double luminance=0, contrast=0, structure=0, SSIMresult = 0;

	for (int i=0; i<src1.rows; i++){
		for (int j=0; j<src1.cols; j++){
			double mean_x = 0, mean_y = 0, STDx = 0, STDy = 0, variance_xy = 0;

			// mean
			for (int x=-HalfSize; x<=HalfSize; x++){
				for (int y=-HalfSize; y<=HalfSize; y++){
					if (i+x<0 || j+y<0 || i+x>=src1.rows || j+y>=src1.cols){
						continue;
					} 
					else{
						mean_x += src1.data[(i+x)*src1.cols + (j+y)] * gaussianFilter[x+HalfSize][y+HalfSize];
						mean_y += src2.data[(i+x)*src2.cols + (j+y)] * gaussianFilter[x+HalfSize][y+HalfSize];
					}			
				}
			}			

			// STD
			for (int x=-HalfSize; x<=HalfSize; x++){
				for (int y=-HalfSize; y<=HalfSize; y++){
					if (i+x<0 || j+y<0 || i+x>=src1.rows || j+y>=src1.cols){
						continue;
					} 
					else{
						STDx += ((src1.data[(i+x)*src1.cols + (j+y)] - mean_x) * (src1.data[(i+x)*src1.cols + (j+y)] - mean_x) * gaussianFilter[x+HalfSize][y+HalfSize]);
						STDy += ((src2.data[(i+x)*src2.cols + (j+y)] - mean_y) * (src2.data[(i+x)*src2.cols + (j+y)] - mean_y) * gaussianFilter[x+HalfSize][y+HalfSize]);
						variance_xy += ((src1.data[(i+x)*src1.cols + (j+y)] - mean_x) * (src2.data[(i+x)*src2.cols + (j+y)] - mean_y) * gaussianFilter[x+HalfSize][y+HalfSize]);
					}
				}
			}
			STDx = sqrt(STDx);
			STDy = sqrt(STDy);

			SSIMresult += ((2*mean_x*mean_y + C1) * (2*variance_xy + C2)) / ((mean_x*mean_x + mean_y*mean_y + C1) * (STDx*STDx + STDy*STDy + C2));		
			// for MS_SSIM calculation
			if (lu_co_st != NULL){
				luminance += (2*mean_x*mean_y + C1) / (mean_x*mean_x + mean_y*mean_y + C1);
				contrast += (2*STDx*STDy + C2) / (STDx*STDx + STDy*STDy + C2);
				structure += (variance_xy + C3) / (STDx*STDy + C3);	
			}
		}
	}

	// for MS_SSIM calculation
	if (lu_co_st != NULL){
		lu_co_st[0] = luminance / (src1.rows * src1.cols);
		lu_co_st[1] = contrast / (src1.rows * src1.cols);
		lu_co_st[2] = structure / (src1.rows * src1.cols);
	}
	SSIMresult /= (src1.rows * src1.cols);
	return SSIMresult;			
}
Ejemplo n.º 19
0
bool ParallaxErrorAnalysis_colorDiffPixels(const cv::Mat& img1,const shape& img1_shp, const cv::Mat& img2, const shape& img2_shp, const cv::Mat& img2_original,
        const cv::Mat& H, const cv::Size2i& img_size ,double& res_error)
{
    vector<cv::Point2f> overlap_points;


    for(int r = 0 ; r < img_size.height; r++)
    {
        for(int c = 0; c < img_size.width; c++)
        {
            shape t_img1_shape = img1_shp;
            shape t_img2_shape = img2_shp;
            cv::Point2f t_point(c, r);
            if(t_img1_shape.isInShape(c, r) && t_img2_shape.isInShape(c, r) )
            {
                // 记下这些点
                overlap_points.push_back(t_point);
            }
        }
    }

    // 找到这些点在 img2_original 上的对应点
    cv::Mat Hn = H.inv();
    vector<cv::Point2f> correPoints_ori(overlap_points.size(),*new cv::Point2f);
    cv::perspectiveTransform(overlap_points, correPoints_ori, Hn); // 两个参数都要是 vector<cv::Point2f> f 不能是 i

    //检测有没有越界的
    /*ofstream Dout("2.txt",ios::out);
    for(int i = 0; i < correPoints_ori.size(); i++)
    {
    	if(correPoints_ori[i].x < 0 || correPoints_ori[i].x > img2_original.cols
    		|| correPoints_ori[i].y < 0 || correPoints_ori[i].y > img2_original.rows)
    	{

    		Dout<< correPoints_ori[i].x << " "<<correPoints_ori[i].y << endl;
    	}
    }
    Dout.clear();
    Dout.close();*/

    // 计算 error
    res_error = 0;
    cv::Mat img_blend;
    vector<cv::Mat> imgs;
    vector<shape> shapes;
    imgs.push_back(img1);
    imgs.push_back(img2);
    shapes.push_back(img1_shp);
    shapes.push_back(img2_shp);
    blending_all(imgs, shapes, img_size, img_blend);

    double res = 0;
    double Be = 0, Ge = 0, Re = 0;


    for(int i = 0; i < overlap_points.size(); i++)
    {
        Be += abs( img_blend.at<Vec3b>(overlap_points[i])[0] - img2_original.at<Vec3b>(correPoints_ori[i])[0]);
        Ge += abs( img_blend.at<Vec3b>(overlap_points[i])[1] - img2_original.at<Vec3b>(correPoints_ori[i])[1]);
        Re += abs( img_blend.at<Vec3b>(overlap_points[i])[2] - img2_original.at<Vec3b>(correPoints_ori[i])[2]);
    }

    res = Be + Ge + Re;
    res_error = res;


    return true;
}
Ejemplo n.º 20
0
float pixkit::qualityassessment::EME(const cv::Mat &src,const cv::Size nBlocks,const short mode){

	//////////////////////////////////////////////////////////////////////////
	// exceptions
	if(src.type()!=CV_8U){
		CV_Assert(false);
	}
	if(nBlocks.width>src.cols||nBlocks.height>src.rows){
		CV_Assert(false);
	}
	if(mode!=1&&mode!=2){
		CV_Assert(false);
	}

	//////////////////////////////////////////////////////////////////////////
	// param
	const	float	c	=	0.0001;

	//////////////////////////////////////////////////////////////////////////
	// define the stepsize
	float	tempv1	=	(float)src.cols/nBlocks.width,
			tempv2	=	(float)src.rows/nBlocks.height;
	if((int)tempv1!=tempv1){
		tempv1	=	(int)	tempv1+1.;
	}
	if((int)tempv2!=tempv2){
		tempv2	=	(int)	tempv2+1.;
	}
	cv::Size	stepsize((int)tempv1,(int)tempv2);

	//////////////////////////////////////////////////////////////////////////
	// estimate
	int		count	=	0;
	float	eme		=	0.;
	for(int i=0;i<src.rows;i+=stepsize.height){
		for(int j=0;j<src.cols;j+=stepsize.width){

			// get local max and min
			float	local_maxv	=	src.data[i*src.cols+j],
					local_minv	=	src.data[i*src.cols+j];		
			if(mode==1){	// standard mode

				for(int m=0;m<stepsize.height;m++){
					for(int n=0;n<stepsize.width;n++){

						if(i+m>=0&&i+m<src.rows&&j+n>=0&&j+n<src.cols){
							if(src.data[(i+m)*src.cols+(j+n)]>local_maxv){
								local_maxv	=	src.data[(i+m)*src.cols+(j+n)];
							}
							if(src.data[(i+m)*src.cols+(j+n)]<local_minv){
								local_minv	=	src.data[(i+m)*src.cols+(j+n)];
							}
						}
					}
				}

			}else if(mode==2){	// BTC's mode

				// find first moment and second moment
				double	moment1=0.,moment2=0.;
				int		count_mom=0;
				for(int m=0;m<stepsize.height;m++){
					for(int n=0;n<stepsize.width;n++){
						if(i+m>=0&&i+m<src.rows&&j+n>=0&&j+n<src.cols){
							moment1+=src.data[(i+m)*src.cols+(j+n)];
							moment2+=src.data[(i+m)*src.cols+(j+n)]*src.data[(i+m)*src.cols+(j+n)];
							count_mom++;
						}						
					}
				}
				moment1/=(double)count_mom;
				moment2/=(double)count_mom;

				// find variance
				double	sd=sqrt(moment2-moment1*moment1);

				// find num of higher than moment1
				int	q=0;
				for(int m=0;m<stepsize.height;m++){
					for(int n=0;n<stepsize.width;n++){
						if(i+m>=0&&i+m<src.rows&&j+n>=0&&j+n<src.cols){
							if(src.data[(i+m)*src.cols+(j+n)]>=moment1){
								q++;
							}
						}
					}
				}
				int		m_q=count_mom-q;
				local_minv=moment1-sd*sqrt((double)q/m_q),
				local_maxv=moment1+sd*sqrt((double)m_q/q);
				if(local_minv>255){
					local_minv=255;
				}
				if(local_minv<0){
					local_minv=0;
				}
				if(local_maxv>255){
					local_maxv=255;
				}
				if(local_maxv<0){
					local_maxv=0;
				}
			}else{
				assert(false);
			}

			// calc EME (Eq. 2) -totally same
			if(local_maxv!=local_minv){
				eme	+=	log((double)local_maxv/(local_minv+c));
			}
			count++;

		}
	}

	return (float)20.*eme/count;
}
 int main(int argc, char* argv[]) {
     // welcome message
     std::cout<<"*********************************************************************************"<<std::endl;
     std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
     std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
     std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
     std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
     std::cout<<"* The retina model still have the following properties:"<<std::endl;
     std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
     std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
     std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
     std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
     std::cout<<"* for more information, reer to the following papers :"<<std::endl;
     std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
     std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
     std::cout<<"* => reports comments/remarks at [email protected]"<<std::endl;
     std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
     std::cout<<"*********************************************************************************"<<std::endl;
     std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
     std::cout<<"*********************************************************************************"<<std::endl;
     std::cout<<"*** You can use free tools to generate OpenEXR images from images sets   :    ***"<<std::endl;
     std::cout<<"*** =>  1. take a set of photos from the same viewpoint using bracketing      ***"<<std::endl;
     std::cout<<"*** =>  2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
     std::cout<<"*** =>  3. apply tone mapping with this program                               ***"<<std::endl;
     std::cout<<"*********************************************************************************"<<std::endl;

     // basic input arguments checking
     if (argc<2)
     {
         help("bad number of parameter");
         return -1;
     }

     bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing

     std::string inputImageName=argv[1];

     //////////////////////////////////////////////////////////////////////////////
     // checking input media type (still image, video file, live video acquisition)
     std::cout<<"RetinaDemo: processing image "<<inputImageName<<std::endl;
     // image processing case
     // declare the retina input buffer... that will be fed differently in regard of the input media
     inputImage = cv::imread(inputImageName, -1); // load image in RGB mode
     std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
     if (!inputImage.total())
     {
        help("could not load image, program end");
            return -1;
         }
     // rescale between 0 and 1
     normalize(inputImage, inputImage, 0.0, 1.0, cv::NORM_MINMAX);
     cv::Mat gammaTransformedImage;
     cv::pow(inputImage, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
     imshow("EXR image original image, 16bits=>8bits linear rescaling ", inputImage);
     imshow("EXR image with basic processing : 16bits=>8bits with gamma correction", gammaTransformedImage);
     if (inputImage.empty())
     {
         help("Input image could not be loaded, aborting");
         return -1;
     }

     //////////////////////////////////////////////////////////////////////////////
     // Program start in a try/catch safety context (Retina may throw errors)
     try
     {
         /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
          * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
          */
         if (useLogSampling)
                {
                     retina = cv::createRetina(inputImage.size(),true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
                 }
         else// -> else allocate "classical" retina :
             retina = cv::createRetina(inputImage.size());

        // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
        retina->write("RetinaDefaultParameters.xml");

                 // desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
                 retina->activateMovingContoursProcessing(false);

         // declare retina output buffers
         cv::Mat retinaOutput_parvo;

         /////////////////////////////////////////////
         // prepare displays and interactions
         histogramClippingValue=0; // default value... updated with interface slider
         //inputRescaleMat = inputImage;
         //outputRescaleMat = imageInputRescaled;
         cv::namedWindow("Retina input image (with cut edges histogram for basic pixels error avoidance)",1);
         cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);

         cv::namedWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", 1);
         colorSaturationFactor=3;
         cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);

         retinaHcellsGain=40;
         cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);

         localAdaptation_photoreceptors=197;
         localAdaptation_Gcells=190;
         cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
         cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);


         /////////////////////////////////////////////
         // apply default parameters of user interaction variables
         rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100);
         retina->setColorSaturation(true,(float)colorSaturationFactor);
         callBack_updateRetinaParams(1,NULL); // first call for default parameters setup

         // processing loop with stop condition
         bool continueProcessing=true;
         while(continueProcessing)
         {
             // run retina filter
             retina->run(imageInputRescaled);
             // Retrieve and display retina output
             retina->getParvo(retinaOutput_parvo);
             cv::imshow("Retina input image (with cut edges histogram for basic pixels error avoidance)", imageInputRescaled/255.0);
             cv::imshow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", retinaOutput_parvo);
             cv::waitKey(10);
         }
     }catch(cv::Exception e)
     {
         std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
     }

     // Program end message
     std::cout<<"Retina demo end"<<std::endl;

     return 0;
 }
Ejemplo n.º 22
0
cv::Mat
Auvsi_Recognize::centerBinary( cv::Mat input )
{
	typedef cv::Vec<unsigned char, 1> VT_binary;

	cv::Mat buffered = cv::Mat( input.rows * 2, input.cols * 2, input.type() );
	cv::Mat retVal;

	int centerX, centerY;
	int minX, minY, maxX, maxY;
	int radiusX, radiusY;
	std::vector<int> xCoords;
	std::vector<int> yCoords;

	// Get centroid
	cv::Moments imMoments = cv::moments( input, true );

	centerX = (imMoments.m10 / imMoments.m00) - buffered.cols / 2;
	centerY = (imMoments.m01 / imMoments.m00) - buffered.rows / 2;

	// Get centered x and y coordinates
	cv::MatIterator_<VT_binary> inputIter = input.begin<VT_binary>();
	cv::MatIterator_<VT_binary> inputEnd = input.end<VT_binary>();

	for( ; inputIter != inputEnd; ++inputIter )
	{
		unsigned char value = (*inputIter)[0];
		if( value )
		{
			xCoords.push_back( inputIter.pos().x - centerX );
			yCoords.push_back( inputIter.pos().y - centerY );
		}
	}

	if( xCoords.size() <= 0 || yCoords.size() <= 0 ) // nothing in image
	{
		return input;
	}

	// Get min and max x and y coords (centered)
	minX = *std::min_element( xCoords.begin(), xCoords.end() );
	minY = *std::min_element( yCoords.begin(), yCoords.end() );
	maxX = *std::max_element( xCoords.begin(), xCoords.end() );
	maxY = *std::max_element( yCoords.begin(), yCoords.end() );	

	// Get new centroids
	centerX = getVectorMean<int>( xCoords );
	centerY = getVectorMean<int>( yCoords );

	// Get radius from center in each direction
	radiusX = std::max( abs(maxX - centerX), abs(centerX - minX) );
	radiusY = std::max( abs(maxY - centerY), abs(centerY - minY) );

	// Center image in temporary buffered array
	buffered = cvScalar(0);

	std::vector<int>::iterator iterX = xCoords.begin();
	std::vector<int>::iterator endX = xCoords.end();
	std::vector<int>::iterator iterY = yCoords.begin();

	for( ; iterX != endX; ++iterX, ++iterY )
	{
		buffered.at<VT_binary>( *iterY, *iterX ) = VT_binary(255);
	}

	// Center image
	buffered = buffered.colRange( centerX - radiusX, centerX + radiusX + 1 );
	buffered = buffered.rowRange( centerY - radiusY, centerY + radiusY + 1 );

	// Add extra padding to make square
	int outH, outW;
	outH = buffered.rows;
	outW = buffered.cols;
	
	if( outH < outW ) // pad height
		cv::copyMakeBorder( buffered, retVal, (outW-outH)/2, (outW-outH)/2, 0, 0, cv::BORDER_CONSTANT, cvScalar(0) );
	else // pad width
		cv::copyMakeBorder( buffered, retVal, 0, 0, (outH-outW)/2, (outH-outW)/2, cv::BORDER_CONSTANT, cvScalar(0) );

	// Make sure output is desired width
	cv::resize( retVal, buffered, input.size(), 0, 0, cv::INTER_NEAREST );
	
	return buffered;
}
void ControllerImageFusion::windowNormalization(cv::Mat &image, cv::Mat source)
{
    float colRatioDb = image.cols/source.cols;
    float rowRatioDb = image.rows/source.rows;
    float val;
    if(std::modf(colRatioDb, &val)!= 0 || std::modf(rowRatioDb, &val) != 0)
    {
        return;
    }
    int colRatio = (int)(colRatioDb);
    int rowRatio = (int)(rowRatioDb);
    if(colRatio<=1 || rowRatio<=1)
    {
        return;
    }
    cv::Mat sourceConverted;
    source.convertTo(sourceConverted, CV_32F);
    int rows = image.rows;
    int cols = image.cols;
    for(int i = 0; i<=rows-rowRatio; i++)
    {
        for(int j = 0; j<=cols-colRatio; j++)
        {
            cv::Mat temp = cv::Mat(image, cv::Rect(j, i, colRatio, rowRatio));
            cv::Scalar mean = cv::mean(temp);
            div_t  rowDiv = std::div(i, rowRatio);
            div_t  colDiv = std::div(j, colRatio);
            cv::Mat sourceTemp;
            // if we reached to the age do not go outside of the image
            int roiCols=2, roiRows=2;
            if(i==rows-rowRatio)
            {
               roiRows = 1;
            }
            if(j==cols-colRatio)
            {
                roiCols = 1;
            }
            cv::Mat sourceRoi = cv::Mat(sourceConverted, cv::Rect(colDiv.quot, rowDiv.quot, roiCols, roiRows));
            sourceRoi.copyTo(sourceTemp);

            sourceTemp.at<float>(0, 0) = sourceTemp.at<float>(0, 0)*(colRatio-colDiv.rem)*(rowRatio-rowDiv.rem);
            if(roiRows>1)
            {
                sourceTemp.at<float>(1, 0) = sourceTemp.at<float>(1, 0)*(colRatio-colDiv.rem)*(rowDiv.rem);
            }
            if(roiCols>1)
            {
                sourceTemp.at<float>(0, 1) = sourceTemp.at<float>(0, 1)*(colDiv.rem)*(rowRatio-rowDiv.rem);
            }
            if(roiCols>1 && roiRows>1)
            {
                sourceTemp.at<float>(1, 1) = sourceTemp.at<float>(1, 1)*(colDiv.rem)*(rowDiv.rem);
            }

            sourceTemp = sourceTemp*(1.0/(rowRatio*colRatio));
            cv::Scalar sourceMean = cv::sum(sourceTemp);

            mean.val[0] = sourceMean.val[0]-mean.val[0];
            cv::add(temp, mean, temp);
        }
    }
}
Ejemplo n.º 24
0
bool task3_5(const cv::Mat& image, const cv::Mat& orig) {
    cv::Mat grey, tmp, res;
    image.copyTo(grey);
    grey.convertTo(grey, CV_32F);

    grey.copyTo(res);
    res.convertTo(res, CV_8U);
    std::vector<cv::Mat> planes(2, cv::Mat());
    std::vector<cv::Mat> polar(2, cv::Mat());

    cv::dft(grey, tmp, cv::DFT_COMPLEX_OUTPUT);
    cv::split(tmp, planes);
    cv::cartToPolar(planes[0], planes[1], polar[0], polar[1]);

    int cx = polar[0].cols / 2;
    int cy = polar[0].rows / 2;
    cv::Point max;

    cv::Mat top = polar[0].rowRange(0, cx);
    cv::Mat bot = polar[0].rowRange(cx, polar[0].rows);

    int row = 0;
    do {
        cv::minMaxLoc(top.rowRange(row++, top.rows), 0, 0, 0, &max);
    } while (max.x == 0);


    int r = 3;

    cv::Mat noizeCol = polar[0].colRange(max.x - r, max.x + r);
    cv::Mat noizeRow = polar[0].rowRange(max.y - r, max.y + r);
    cv::Mat blurCol = polar[0].colRange(max.x - 12, max.x - 12 + 2 * r);
    cv::Mat blurRow = polar[0].rowRange(max.y - 3 * r, max.y - r);

    blurCol.copyTo(noizeCol);
    blurRow.copyTo(noizeRow);


    cv::Mat noizeColB = polar[0].colRange(polar[0].cols - max.x - r, polar[0].cols - max.x + r);
    cv::Mat noizeRowB = polar[0].rowRange(polar[0].rows - max.y - r, polar[0].rows - max.y + r);

    blurCol.copyTo(noizeColB);
    blurRow.copyTo(noizeRowB);

    cv::Mat roi = polar[0];
    cv::Mat mean, stddev, tmp1;
    roi = roi.colRange(max.x + 20, roi.cols - max.x - 20).rowRange(max.y + 20, roi.cols - max.y - 20);
    for (int i = 0; i < roi.rows; ++i) {
        cv::Mat row = roi.row(i);
        cv::meanStdDev(row, mean, stddev);
        float m = mean.at<double>(0, 0);
        float st = stddev.at<double>(0, 0);
        for (Mfit mfit = row.begin<float>(); mfit != row.end<float>(); ++mfit) {
            if (*mfit > m + 1.5 * st) {
                *mfit = 0.5 * m;
            }
        }
    }
    visualization(polar[0], tmp);

    //    
    //    
    //    cv::namedWindow("Lesson 2", CV_WINDOW_NORMAL);
    //    cv::imshow("Lesson 2", tmp);
    //    cv::waitKey(0);


    cv::polarToCart(polar[0], polar[1], planes[0], planes[1]);
    cv::merge(planes, tmp);
    cv::dft(tmp, tmp, cv::DFT_SCALE | cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT);



    tmp.convertTo(tmp, CV_8U);



    cv::Mat lut(1, 256, CV_32F, cv::Scalar(0));
    for (int i = 0; i < 256; ++i) {
        lut.at<float>(0, i) = i;
    }

    for (int i = 65; i < 200; ++i) {
        lut.at<float>(0, i) = i - 30;
    }
    
    for (int i = 200; i < 220; ++i) {
        lut.at<float>(0, i) = i - 20;
    }
    
    lut.convertTo(lut, CV_8U);

    tmp.convertTo(tmp, CV_8U);

    cv::normalize(tmp, tmp, 0, 255, cv::NORM_MINMAX);
    
    cv::LUT(tmp, lut, tmp);


    cv::GaussianBlur(tmp, tmp, cv::Size(3, 3), 1);
    cv::medianBlur(tmp, tmp, 3);
    cv::Mat result;
    cv::matchTemplate(orig, tmp, result, CV_TM_SQDIFF);
    std::cout << "RMSE Task 3.5: " << result / (orig.cols * orig.rows) << std::endl;

    concatImages(res, tmp, res);
    cv::absdiff(tmp, orig, tmp);
    concatImages(res, tmp, res);

    cv::absdiff(image, orig, tmp);
    concatImages(res, tmp, res);
    concatImages(res, orig, res);

//    cv::namedWindow("Lesson 2", CV_WINDOW_NORMAL);
//    cv::imshow("Lesson 2", res);
//    cv::waitKey(0);

    return cv::imwrite(PATH + "Task3_5.jpg", res);
}
Ejemplo n.º 25
0
vector<cv::Mat> UVDisparity::Pitch_Classify(cv::Mat &xyz,cv::Mat& ground_mask)
{
   vector<cv::Mat> pitch_measure;

   cv::Mat pitch1;
   pitch1.create(1,1,CV_32F);
   cv::Mat pitch2;
   pitch2.create(1,1,CV_32F);

   cv::Mat bin,dst,dst1;

  GaussianBlur(v_dis_,dst,Size(3,3),0,0);

  Mat element = getStructuringElement(MORPH_RECT,Size(3,3),Point(1,1));
  erode(dst,dst1,element);

  cv::threshold(dst1,bin,0,255,THRESH_OTSU);

  //cv::imshow("color",bin);
  //cv::waitKey(0);
    
  std::vector<Point> pt_list;
  //select the points to estimate line function
  for(int i = 26;i < bin.cols;i++)
  {
    for(int j = bin.rows-1;j>=0;j--)
    {
      int v = bin.at<uchar>(j,i);
      if(v == 255)
      {
        //cout<<"the lowest pixel is: "<<i<<","<<j<<endl;
        pt_list.push_back(cv::Point(i,j));

        for(int k = j; k > max(j-30,0); k--)
        {
            int v_a = bin.at<uchar>(k,i);
            if(v_a == 255)
            {
                pt_list.push_back(cv::Point(i,k));

            }
        }
        
        break;
      }
    }
  }

  std::vector<Point> pt_list2;
  //select the points to estimate line function
  for(int i = 12;i < 26;i++)
  {
    for(int j = bin.rows-1;j>=0;j--)
    {
      int v = bin.at<uchar>(j,i);
      if(v == 255)
      {
        pt_list2.push_back(cv::Point(i,j));
        break;
      }
    }
  }

  //cout<<"length of list is: "<<pt_list.size()<<endl;
  Vec4f line1;
  Vec4f line2;


  //fitting line function
  cv::fitLine(pt_list,line1,CV_DIST_L2,0,0.01,0.01);
  cv::fitLine(pt_list,line2,CV_DIST_L2,0,0.01,0.01);
  //cv::fitLine(pt_list2,line2,CV_DIST_L2,0,0.01,0.01);

  float a = line1[0];float b = line1[1];
  int x0 = cvRound(line1[2]);int y0 = cvRound(line1[3]);

  float a2 = line2[0];float b2 = line2[1];
  int x2 = cvRound(line2[2]);int y2 = cvRound(line2[3]);

  double V_C = y0 - (b/a)*x0;
  double V_C2 = y2 - (b2/a2)*x2;


  vector<Vec4i> lines;
  double V_0 = calib_.c_y;
  double F = calib_.f;

  double theta = atan((V_0-V_C)/F);
  double theta2 = atan((V_0-V_C2)/F);

  cv::line(v_dis_show,cv::Point(0,(b2/a2)*0+V_C2),cv::Point(26,(b2/a2)*26+V_C2),cv::Scalar(0,255,0),2,8);
  cv::line(v_dis_show,cv::Point(0,(b2/a2)*0+V_C2-20),cv::Point(26,(b2/a2)*26+V_C2-20),cv::Scalar(0,0,255),2,8);

  cv::line(v_dis_show,cv::Point(26,(b/a)*26+V_C),cv::Point(100,(b/a)*100+V_C),cv::Scalar(255,0,0),2,8);
  cv::line(v_dis_show,cv::Point(26,(b/a)*26+V_C-20),cv::Point(100,(b/a)*100+V_C-20),cv::Scalar(0,0,255),2,8);

  pitch1.at<float>(0)=theta;
  pitch2.at<float>(0)=theta2;

  //classify the points on ground plane and obstacles with respect to its distance to the line in V-disparity
 int xyz_cols = xyz.cols;
 int xyz_rows = xyz.rows;

  for(int j = 0; j < xyz_rows; j++)
  {

      float* xyz_ptr = xyz.ptr<float>(j);
      for(int i = 0;i < xyz_cols; i++)
      {

           float v = xyz_ptr[10*i + 4];
           float d = xyz_ptr[10*i + 5];
           int intensity = cvRound(xyz_ptr[10*i + 6]);
           float distance = (v-(b/a)*d-V_C);

           if(d > 26.0f)
           {
               if(distance > -14.0f)
               {
                   xyz_ptr[10*i+9] = 0.0f;//make the intensity as zero
               }
               else
               {
                   xyz_ptr[10*i+9] = abs(intensity);
               }
           }
           else if(d < 26.1f && d > 8.0f)
           {
               if(distance > -14.0f)
               {
                   xyz_ptr[10*i+9] = 0.0f;//make the intensity as zero
               }
               else
               {
                   xyz_ptr[10*i+9] = abs(intensity);
               }
           }
           else
           {
               xyz_ptr[10*i+9] = 0;//make the intensity as zero
           }

      }


  }


  vector<Mat> channels(8);
//  split img:
  split(xyz, channels);
//  get the channels (dont forget they follow BGR order in OpenCV)
  cv::Mat ch9 = channels[9];
  ground_mask.create(ch9.size(),CV_8UC1);
  cv::convertScaleAbs(ch9,ground_mask);

  pitch_measure.push_back(pitch1);
  pitch_measure.push_back(pitch2);

  return pitch_measure;
 }
Ejemplo n.º 26
0
void scaleImage(const cv::Mat& src, cv::Mat& dst, double alpha, double beta) {
    CV_Assert(src.channels() == 3);

    dst = alpha * src + cv::Scalar(beta, beta, beta);
}
Ejemplo n.º 27
0
	//
	//Write an image to the stream
	void ImageSequenceIO::WriteImageToStream(const cv::Mat &image, const int frameId)
	{
		m_pState->m_ofs.write((char*)&m_pState->m_readFrameId,sizeof(int));
		m_pState->m_ofs.write((const char*)image.ptr(),m_pState->m_writeHeader.totalSize());
	}
Ejemplo n.º 28
0
void CVThread::setImage(const cv::Mat& img)
{
	m_frames.m_result = m_frames.m_inFrame = img.clone();
	
	emit imageChanged(); 
}
Ejemplo n.º 29
0
void DataTransformer<Dtype>::Transform(const cv::Mat& cv_img,
                                       Blob<Dtype>* transformed_blob,
                                       bool preserve_pixel_vals) {
  const int img_channels = cv_img.channels();
  const int img_height = cv_img.rows;
  const int img_width = cv_img.cols;

  // Check dimensions.
  const int channels = transformed_blob->channels();
  const int height = transformed_blob->height();
  const int width = transformed_blob->width();
  const int num = transformed_blob->num();

  CHECK_EQ(channels, img_channels);
  CHECK_LE(height, img_height);
  CHECK_LE(width, img_width);
  CHECK_GE(num, 1);

  CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte";
  const int crop_size = param_.crop_size();
  const Dtype scale = preserve_pixel_vals ? 1 : param_.scale();
  const bool do_mirror = param_.mirror() && Rand(2);
  const bool has_mean_file = param_.has_mean_file();
  const bool has_mean_values = mean_values_.size() > 0;

  CHECK_GT(img_channels, 0);
  CHECK_GE(img_height, crop_size);
  CHECK_GE(img_width, crop_size);

  Dtype* mean = NULL;
  if (has_mean_file && !preserve_pixel_vals) {
    CHECK_EQ(img_channels, data_mean_.channels());
    CHECK_EQ(img_height, data_mean_.height());
    CHECK_EQ(img_width, data_mean_.width());
    mean = data_mean_.mutable_cpu_data();
  }
  if (has_mean_values && !preserve_pixel_vals) {
    CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) <<
     "Specify either 1 mean_value or as many as channels: " << img_channels;
    if (img_channels > 1 && mean_values_.size() == 1) {
      // Replicate the mean_value for simplicity
      for (int c = 1; c < img_channels; ++c) {
        mean_values_.push_back(mean_values_[0]);
      }
    }
  }

  int h_off = 0;
  int w_off = 0;
  cv::Mat cv_cropped_img = cv_img;
  if (crop_size) {
    CHECK_EQ(crop_size, height);
    CHECK_EQ(crop_size, width);
    // We only do random crop when we do training.
    if (phase_ == TRAIN) {
      h_off = Rand(img_height - crop_size + 1);
      w_off = Rand(img_width - crop_size + 1);
    } else {
      h_off = (img_height - crop_size) / 2;
      w_off = (img_width - crop_size) / 2;
    }
    cv::Rect roi(w_off, h_off, crop_size, crop_size);
    cv_cropped_img = cv_img(roi);
  } else {
    CHECK_EQ(img_height, height);
    CHECK_EQ(img_width, width);
  }

  CHECK(cv_cropped_img.data);

  Dtype* transformed_data = transformed_blob->mutable_cpu_data();
  int top_index;
  for (int h = 0; h < height; ++h) {
    const uchar* ptr = cv_cropped_img.ptr<uchar>(h);
    int img_index = 0;
    for (int w = 0; w < width; ++w) {
      for (int c = 0; c < img_channels; ++c) {
        if (do_mirror) {
          top_index = (c * height + h) * width + (width - 1 - w);
        } else {
          top_index = (c * height + h) * width + w;
        }
        // int top_index = (c * height + h) * width + w;
        Dtype pixel = static_cast<Dtype>(ptr[img_index++]);
        if (has_mean_file && !preserve_pixel_vals) {
          int mean_index = (c * img_height + h_off + h) * img_width + w_off + w;
          transformed_data[top_index] =
            (pixel - mean[mean_index]) * scale;
        } else {
          if (has_mean_values && !preserve_pixel_vals) {
            transformed_data[top_index] =
              (pixel - mean_values_[c]) * scale;
          } else {
            transformed_data[top_index] = pixel * scale;
          }
        }
      }
    }
  }
}
Ejemplo n.º 30
0
	void ScaleMaps::run(const cv::Mat& img, const vector<FeatureType>& features,
		vector<float>& scaleMap)
	{
		size_t r, c, i, j, nr, nc, min_row, max_row, min_col, max_col;
		size_t neighborCount, coefficientCount = 0;
		std::vector<Eigen::Triplet<float>> coefficients;	// list of non-zeros coefficients
		vector<float> weights(9);
		float m, v, sum;
		size_t pixels = img.total();
		int index = -1, nindex;


		// Initialization
		vector<bool> scale(pixels, false);
		for (i = 0; i < features.size(); ++i)
		{
			const FeatureType& feat = features[i];
			scale[((int)feat.y)*img.cols + (int)feat.x] = true;
		}

		// Adjust image values
		cv::Mat scaledImg = img.clone();
		scaledImg += 1;
		scaledImg *= (1 / 32.0f);

		// For each pixel in the image
		for (r = 0; r < scaledImg.rows; ++r)
		{
			min_row = (size_t)std::max(int(r - 1), 0);
			max_row = (size_t)std::min(scaledImg.rows - 1, int(r + 1));
			for (c = 0; c < scaledImg.cols; ++c)
			{
				// Increment pixel index
				++index;

				// If this is not a feature point
				if (!scale[index])
				{
					min_col = (size_t)std::max(int(c - 1), 0);
					max_col = (size_t)std::min(scaledImg.cols - 1, int(c + 1));
					neighborCount = 0;

					// Loop over 3x3 neighborhoods matrix
					// and calculate the variance of the intensities
					for (nr = min_row; nr <= max_row; ++nr)
					{
						for (nc = min_col; nc <= max_col; ++nc)
						{
							if (nr == r && nc == c) continue;
							weights[neighborCount++] = scaledImg.at<float>(nr, nc);
						}
					}
					weights[neighborCount] = scaledImg.at<float>(r, c);

					// Calculate the weights statistics
					getStat(weights, neighborCount + 1, m, v);
					m *= 0.6;
					if (v < EPS) v = EPS;	// Avoid division by 0

					// Apply weight function
					mWeightFunc(weights, neighborCount, scaledImg.at<float>(r, c), m, v);

					// Normalize the weights and set to coefficients
					sum = std::accumulate(weights.begin(), weights.begin() + neighborCount, 0.0f);
					i = 0;
					for (nr = min_row; nr <= max_row; ++nr)
					{
						for (nc = min_col; nc <= max_col; ++nc)
						{
							if (nr == r && nc == c) continue;
							nindex = nr*scaledImg.cols + nc;
							coefficients.push_back(Eigen::Triplet<float>(
								index, nindex, -weights[i++] / sum));
						}
					}
				}

				// Add center coefficient
				coefficients.push_back(Eigen::Triplet<float>(index, index, 1));
			}
		}

		// Build right side equation vector
		Eigen::VectorXf b = Eigen::VectorXf::Zero(pixels);
		for (i = 0; i < features.size(); ++i)
		{
			const FeatureType& feat = features[i];
			b[((int)feat.y)*scaledImg.cols + (int)feat.x] = feat.scale;
		}

		// Build left side equation matrix
		Eigen::SparseMatrix<float> A(pixels, pixels);
		A.setFromTriplets(coefficients.begin(), coefficients.end());

		/*/// Debug ///
		std::ofstream file("Output.m");
		cv::Mat_<int> rows(1, coefficients.size()), cols(1, coefficients.size());
		cv::Mat_<float> values(1, coefficients.size());
		for (i = 0; i < coefficients.size(); ++i)
		{
		rows.at<int>(i) = coefficients[i].row();
		cols.at<int>(i) = coefficients[i].col();
		values.at<float>(i) = coefficients[i].value();
		}
		file << "cpp_rows = " << rows << ";" << std::endl;
		file << "cpp_cols = " << cols << ";" << std::endl;
		file << "cpp_values = " << values << ";" << std::endl;
		/////////////*/

		// Solving
		Eigen::SparseLU<Eigen::SparseMatrix<float>> slu(A);
		Eigen::VectorXf x = slu.solve(b);

		// Copy to output
		scaleMap.resize(pixels);
		memcpy(scaleMap.data(), x.data(), pixels*sizeof(float));
	}