Exemple #1
0
void Classifier::Preprocess(const cv::Mat& img,
                            std::vector<cv::Mat>* input_channels) {
  /* Convert the input image to the input image format of the network. */
  cv::Mat sample;
  if (img.channels() == 3 && num_channels_ == 1)
    cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY);
  else if (img.channels() == 4 && num_channels_ == 1)
    cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY);
  else if (img.channels() == 4 && num_channels_ == 3)
    cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR);
  else if (img.channels() == 1 && num_channels_ == 3)
    cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR);
  else
    sample = img;

  cv::Mat sample_resized;
  if (sample.size() != input_geometry_)
    cv::resize(sample, sample_resized, input_geometry_);
  else
    sample_resized = sample;

  cv::Mat sample_float;
  if (num_channels_ == 3)
    sample_resized.convertTo(sample_float, CV_32FC3);
  else
    sample_resized.convertTo(sample_float, CV_32FC1);

  cv::Mat sample_normalized;
  cv::subtract(sample_float, mean_, sample_normalized);

  /* This operation will write the separate BGR planes directly to the
   * input layer of the network because it is wrapped by the cv::Mat
   * objects in input_channels. */
  cv::split(sample_normalized, *input_channels);

  CHECK(reinterpret_cast<float*>(input_channels->at(0).data)
        == net_->input_blobs()[0]->cpu_data())
    << "Input channels are not wrapping the input layer of the network.";
}
Exemple #2
0
void Camera::toGrayScaleMat(cv::Mat& m)
{   
    unsigned char *m_ = (unsigned char *)(m.data);
    
    for (int x = 0; x < w2; x++) {
        for (int y = 0; y < height; y++) {
            int y0, y1;

            int i = (y * w2 + x)*4;
            y0 = data[i];
            y1 = data[i + 2];
            
            i = y*m.cols*m.channels() + x*2*m.channels(); 
            m_[i + 0] = (unsigned char) (y0);
            m_[i + 1] = (unsigned char) (y1);
            
            //More clear in logic but slower solution
            //m.at<uchar>(y, (2*x)) = (unsigned char) (y0);
            //m.at<uchar>(y, (2*x)+1) = (unsigned char) (y1);
        }
    }
}
QImage Tools::Mat2QImage(cv::Mat const& src)
{
  QImage dest(src.cols, src.rows, QImage::Format_ARGB32);
  const float scale = 255.0;
  if (src.depth() == CV_8U) {
    if (src.channels() == 1) {
      for (int i = 0; i < src.rows; ++i) {
	for (int j = 0; j < src.cols; ++j) {
	  int level = src.at<quint8>(i, j);
	  dest.setPixel(j, i, qRgb(level, level, level));
	}
      }
    } else if (src.channels() == 3) {
      for (int i = 0; i < src.rows; ++i) {
	for (int j = 0; j < src.cols; ++j) {
	  cv::Vec3b bgr = src.at<cv::Vec3b>(i, j);
	  dest.setPixel(j, i, qRgb(bgr[2], bgr[1], bgr[0]));
	}
      }
    }
  } else if (src.depth() == CV_32F) {
    if (src.channels() == 1) {
      for (int i = 0; i < src.rows; ++i) {
	for (int j = 0; j < src.cols; ++j) {
	  int level = scale * src.at<float>(i, j);
	  dest.setPixel(j, i, qRgb(level, level, level));
	}
      }
    } else if (src.channels() == 3) {
      for (int i = 0; i < src.rows; ++i) {
	for (int j = 0; j < src.cols; ++j) {
	  cv::Vec3f bgr = scale * src.at<cv::Vec3f>(i, j);
	  dest.setPixel(j, i, qRgb(bgr[2], bgr[1], bgr[0]));
	}
      }
    }
  }
  return dest;
}
Exemple #4
0
void NccHisto::getProba(const cv::Mat ncc, const cv::Mat sumstdev, cv::Mat proba)
{
	if (lut==0) loadHistogram();
	assert(lut);
	if (lut==0) return;
	assert(ncc.channels()==1);
	assert(sumstdev.channels()==1);
	assert(ncc.cols == proba.cols && ncc.rows==proba.rows);
	assert(proba.channels()==1);

	const int w=ncc.cols;
	const int h=ncc.rows;

	for (int y=0; y<h;y++) {
		float *dst = proba.ptr<float>(y);
		const float *src = ncc.ptr<float>(y);
		const float *sum = sumstdev.ptr<float>(y);
		for (int x=0;x<w;x++) {
			dst[x] = lut[lut_idx(src[x], sum[x])];
		}
	}
}
Exemple #5
0
cv::Mat crosscorrelation(const cv::Mat& A, const cv::Mat& B)
{
  if (A.channels() != 2 || B.channels() != 2)
  {
    throw CustomException("crosscorrelation: Must be two two-channel images.");
  }

  cv::Mat aPadded;
  cv::Mat bPadded;
  cv::Mat doubleA, doubleB;
/*
  //The following expands the image to an optimal size in order to be the fourier transform efficient
   * It is not used right now, since the image is double-sized to apply crosscorrelation properly
  int m = getOptimalDFTSize( A.rows );
  int n = getOptimalDFTSize( A.cols ); // on the border add zero values
  copyMakeBorder(A, aPadded, 0, m - A.rows, 0, n - A.cols, BORDER_CONSTANT, Scalar(0,0));
  copyMakeBorder(B, bPadded, 0, p - B.rows, 0, q - B.cols, BORDER_CONSTANT, Scalar(0,0));
*/
  //REMEMBER!!  The result of the correlation is twice the size of the input arrays!
  //There must be enough space for the overlapping of both functions
  cv::copyMakeBorder(A, aPadded, 0, A.rows, 0, A.cols, cv::BORDER_CONSTANT, cv::Scalar(0.0, 0.0));
  cv::copyMakeBorder(B, bPadded, 0, B.rows, 0, B.cols, cv::BORDER_CONSTANT, cv::Scalar(0.0, 0.0));

  //CAUTION!! Know differences between: DFT_COMPLEX_OUTPUT, DFT_SCALE, DFT_REAL_OUTPUT
  cv::dft(aPadded, aPadded, cv::DFT_COMPLEX_OUTPUT + cv::DFT_SCALE);
  cv::dft(bPadded, bPadded, cv::DFT_COMPLEX_OUTPUT + cv::DFT_SCALE);

  cv::Mat C, tmpC;
  //CAUTION!! Know differences between: DFT_COMPLEX_OUTPUT, DFT_SCALE, DFT_REAL_OUTPUT
  bool conjugateB(true);  //optional parameter, false by default :: set this parameter to false to turn this operation into convolution
  cv::mulSpectrums(aPadded, bPadded.mul(aPadded.rows * aPadded.cols), tmpC, cv::DFT_COMPLEX_OUTPUT, conjugateB);

  //Note None of dft and idft scales the result by default.
  //So, you should pass DFT_SCALE to one of dft or idft explicitly to make these transforms mutually inverse.
  cv::idft(tmpC, C, cv::DFT_COMPLEX_OUTPUT);

  //REMEMBER NORMALIZE!! When calculating OTF, the value at origin must be equal to unity!! energy conservation
  return C;
}
void MeanTransformer::Apply(size_t id, cv::Mat &mat)
{
    UNUSED(id);
    assert(m_meanImg.size() == cv::Size(0, 0) ||
           (m_meanImg.size() == mat.size() &&
            m_meanImg.channels() == mat.channels()));

    // REVIEW alexeyk: check type conversion (float/double).
    if (m_meanImg.size() == mat.size())
    {
        mat = mat - m_meanImg;
    }
}
Exemple #7
0
void cv::DisplayHelper::display(const cv::Mat& oInputImg, const cv::Mat& oDebugImg, const cv::Mat& oOutputImg, size_t nIdx) {
    CV_Assert(!oInputImg.empty() && (oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3 || oInputImg.type()==CV_8UC4));
    CV_Assert(!oDebugImg.empty() && (oDebugImg.type()==CV_8UC1 || oDebugImg.type()==CV_8UC3 || oDebugImg.type()==CV_8UC4) && oDebugImg.size()==oInputImg.size());
    CV_Assert(!oOutputImg.empty() && (oOutputImg.type()==CV_8UC1 || oOutputImg.type()==CV_8UC3 || oOutputImg.type()==CV_8UC4) && oOutputImg.size()==oInputImg.size());
    cv::Mat oInputImgBYTE3, oDebugImgBYTE3, oOutputImgBYTE3;
    if(oInputImg.channels()==1)
        cv::cvtColor(oInputImg,oInputImgBYTE3,cv::COLOR_GRAY2BGR);
    else if(oInputImg.channels()==4)
        cv::cvtColor(oInputImg,oInputImgBYTE3,cv::COLOR_BGRA2BGR);
    else
        oInputImgBYTE3 = oInputImg;
    if(oDebugImg.channels()==1)
        cv::cvtColor(oDebugImg,oDebugImgBYTE3,cv::COLOR_GRAY2BGR);
    else if(oDebugImg.channels()==4)
        cv::cvtColor(oDebugImg,oDebugImgBYTE3,cv::COLOR_BGRA2BGR);
    else
        oDebugImgBYTE3 = oDebugImg;
    if(oOutputImg.channels()==1)
        cv::cvtColor(oOutputImg,oOutputImgBYTE3,cv::COLOR_GRAY2BGR);
    else if(oOutputImg.channels()==4)
        cv::cvtColor(oOutputImg,oDebugImgBYTE3,cv::COLOR_BGRA2BGR);
    else
        oOutputImgBYTE3 = oOutputImg;
    cv::Size oCurrDisplaySize;
    if(m_oMaxDisplaySize.area()>0 && (oOutputImgBYTE3.cols>m_oMaxDisplaySize.width || oOutputImgBYTE3.rows>m_oMaxDisplaySize.height)) {
        if(oOutputImgBYTE3.cols>m_oMaxDisplaySize.width && oOutputImgBYTE3.cols>oOutputImgBYTE3.rows)
            oCurrDisplaySize = cv::Size(m_oMaxDisplaySize.width,m_oMaxDisplaySize.width*(oOutputImgBYTE3.rows/oOutputImgBYTE3.cols));
        else
            oCurrDisplaySize = cv::Size(m_oMaxDisplaySize.height*(oOutputImgBYTE3.cols/oOutputImgBYTE3.rows),m_oMaxDisplaySize.height);
        cv::resize(oInputImgBYTE3,oInputImgBYTE3,oCurrDisplaySize);
        cv::resize(oDebugImgBYTE3,oDebugImgBYTE3,oCurrDisplaySize);
        cv::resize(oOutputImgBYTE3,oOutputImgBYTE3,oCurrDisplaySize);
    }
    else
        oCurrDisplaySize = oOutputImgBYTE3.size();
    std::stringstream sstr;
    sstr << "Input #" << nIdx;
    putText(oInputImgBYTE3,sstr.str(),cv::Scalar_<uchar>(0,0,255));
    putText(oDebugImgBYTE3,"Debug",cv::Scalar_<uchar>(0,0,255));
    putText(oOutputImgBYTE3,"Output",cv::Scalar_<uchar>(0,0,255));
    if(m_bFirstDisplay) {
        putText(oDebugImgBYTE3,"[Press space to continue]",cv::Scalar_<uchar>(0,0,255),true,cv::Point2i(oDebugImgBYTE3.cols/2-100,15),1,1.0);
        m_bFirstDisplay = false;
    }
    std::lock_guard<std::mutex> oLock(m_oEventMutex);
    const cv::Point2i& oDbgPt = m_oLatestMouseEvent.oPosition;
    const cv::Size& oLastDbgSize = m_oLatestMouseEvent.oDisplaySize;
    if(oDbgPt.x>=0 && oDbgPt.y>=0 && oDbgPt.x<oLastDbgSize.width*3 && oDbgPt.y<oLastDbgSize.height) {
        const cv::Point2i oDbgPt_rescaled(int(oCurrDisplaySize.width*(float(oDbgPt.x%oLastDbgSize.width)/oLastDbgSize.width)),int(oCurrDisplaySize.height*(float(oDbgPt.y)/oLastDbgSize.height)));
        cv::circle(oInputImgBYTE3,oDbgPt_rescaled,5,cv::Scalar(255,255,255));
        cv::circle(oDebugImgBYTE3,oDbgPt_rescaled,5,cv::Scalar(255,255,255));
        cv::circle(oOutputImgBYTE3,oDbgPt_rescaled,5,cv::Scalar(255,255,255));
    }
    cv::Mat displayH;
    cv::hconcat(oInputImgBYTE3,oDebugImgBYTE3,displayH);
    cv::hconcat(displayH,oOutputImgBYTE3,displayH);
    cv::imshow(m_sDisplayName,displayH);
    m_oLastDisplaySize = oCurrDisplaySize;
}
Exemple #8
0
void imshowAllChannels(const std::string winname, cv::Mat img)
{
	std::vector<cv::Mat> imgs;
	cv::split(img, imgs);


	for (int i = 0; i < img.channels(); i++)
	{
		std::stringstream ss;
		ss << winname <<":" <<i;
		cv::imshow(ss.str(), imgs[i]);
	}
}
QImage QNode::cvtCvMat2QImage(const cv::Mat & image)
{
        QImage qtemp;
        if(!image.empty() && image.depth() == CV_8U)
        {
                const unsigned char * data = image.data;
                qtemp = QImage(image.cols, image.rows, QImage::Format_RGB32);
                for(int y = 0; y < image.rows; ++y, data += image.cols*image.elemSize())
                {
                        for(int x = 0; x < image.cols; ++x)
                        {
                                QRgb * p = ((QRgb*)qtemp.scanLine (y)) + x;
                                *p = qRgb(data[x * image.channels()+2], data[x * image.channels()+1], data[x * image.channels()]);
                        }
                }
        }
        else if(!image.empty() && image.depth() != CV_8U)
        {
                printf("Wrong image format, must be 8_bits\n");
        }
        return qtemp;
}
Exemple #10
0
void drwnLBPFilterBank::filter(const cv::Mat& img, std::vector<cv::Mat>& response) const
{
    // check input
    DRWN_ASSERT(img.data != NULL);
    if (response.empty()) {
        response.resize(this->numFilters());
    }
    DRWN_ASSERT(response.size() == this->numFilters());

    if (img.channels() != 1) {
        cv::Mat tmp(img.rows, img.cols, img.depth());
        cv::cvtColor(img, tmp, CV_RGB2GRAY);
        return filter(tmp, response);
    }
    DRWN_ASSERT_MSG(img.depth() == CV_8U, "image must be 8-bit");

    // allocate output channels as 32-bit floating point
    for (unsigned i = 0; i < response.size(); i++) {
        if ((response[i].rows == img.rows) && (response[i].cols == img.cols) &&
            (response[i].depth() == CV_32F) && (response[i].channels() == 1)) {
            response[i].setTo(0.0f);
        } else {
            response[i] = cv::Mat::zeros(img.rows, img.cols, CV_32FC1);
        }
    }

    for (int y = 0; y < img.rows; y++) {

        const unsigned char *p = img.ptr<const unsigned char>(y);
        const unsigned char *p_prev = (y == 0) ? p : img.ptr<const unsigned char>(y - 1);
        const unsigned char *p_next = (y == img.rows - 1) ? p : img.ptr<const unsigned char>(y + 1);

        // 4-connected neighbourhood
        for (int x = 0; x < img.cols; x++) {
            if (p[x] > p_prev[x]) response[0].at<float>(y, x) = 1.0f;
            if ((x < img.cols - 1) && (p[x] > p[x + 1])) response[1].at<float>(y, x) = 1.0f;
            if (p[x] > p_next[x]) response[2].at<float>(y, x) = 1.0f;
            if ((x > 0) && (p[x] > p[x - 1])) response[3].at<float>(y, x) = 1.0f;
        }

        // 8-connected neighbourhood
        if (_b8Neighbourhood) {
            for (int x = 0; x < img.cols; x++) {
                if ((p[x] > p_prev[x]) && (x < img.cols - 1) && (p[x] > p[x + 1])) response[4].at<float>(y, x) = 1.0f;
                if ((x < img.cols - 1) && (p[x] > p[x + 1]) && (p[x] > p_next[x])) response[5].at<float>(y, x) = 1.0f;
                if ((p[x] > p_next[x]) && (x > 0) && (p[x] > p[x - 1])) response[6].at<float>(y, x) = 1.0f;
                if ((x > 0) && (p[x] > p[x - 1]) && (p[x] > p_prev[x])) response[7].at<float>(y, x) = 1.0f;
            }
        }
    }
}
cv::Mat
Auvsi_Recognize::doClustering( cv::Mat input, int numClusters, bool colored = true )
{
	#ifdef TWO_CHANNEL
		typedef cv::Vec<T, 2> VT;
	#else
		typedef cv::Vec<T, 3> VT;
	#endif
	
	typedef cv::Vec<int, 1> IT;
	
	const int NUMBER_OF_ATTEMPTS = 5;
	int inputSize = input.rows*input.cols;

	// Create destination image
	cv::Mat retVal( input.size(), input.type() );

	// Format input to k-means
	cv::Mat kMeansData( input );
	kMeansData = kMeansData.reshape( input.channels(), inputSize );

	// For the output of k-means
	cv::Mat labels( inputSize, 1, CV_32S );
	cv::Mat centers( numClusters, 1, input.type() );

	// Perform the actual k-means clustering
	// POSSIBLE FLAGS: KMEANS_PP_CENTERS KMEANS_RANDOM_CENTERS
	cv::kmeans( kMeansData, numClusters, labels, cv::TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0 ), NUMBER_OF_ATTEMPTS, cv::KMEANS_RANDOM_CENTERS, &centers );

	// Label the image according to the clustering results
	cv::MatIterator_<VT> retIterator = retVal.begin<VT>();
	cv::MatIterator_<VT> retEnd = retVal.end<VT>();

	cv::MatIterator_<IT> labelIterator = labels.begin<IT>();

	for( ; retIterator != retEnd; ++retIterator, ++labelIterator )
	{
		VT data = centers.at<VT>( cv::saturate_cast<int>((*labelIterator)[0]), 0);

		#ifdef TWO_CHANNEL
			*retIterator = VT( cv::saturate_cast<T>(data[0]), cv::saturate_cast<T>(data[1]) );//, cv::saturate_cast<T>(data[2]) );
		#else
			*retIterator = VT( cv::saturate_cast<T>(data[0]), cv::saturate_cast<T>(data[1]), cv::saturate_cast<T>(data[2]) );
		#endif
	}

	if( colored )
		return retVal;
	else
		return labels;
}
Exemple #12
0
void SkinSplit::cvThresholdOtsu(cv::Mat& src, cv::Mat& dst)
{
    assert(src.channels() == 1);
    int hist[256] = {0};
    double pro_hist[256] = {0.0};

    int height =  src.rows;
    int width = src.cols;
    //统计每个灰度的数量
    for(int i=0;i<width;i++)
    {
        for(int j=0;j<height;j++)
        {
            int temp = src.at<uchar>(i,j);
            hist[temp]++;
        }
    }

    //计算每个灰度级占图像中的概率
    for(int i=0;i<256;i++)
    {
        pro_hist[i] = (double)hist[i]/(double)(width*height);
    }

    //计算平均灰度
    double avgPixel = 0.0;
    for(int i=0;i<256;i++)
    {
        avgPixel += i*pro_hist[i];
    }

    int threshold=0;
    double maxVariance = 0;
    double w =0,u=0;
    for(int i=0;i<256;i++)
    {
        w+=pro_hist[i];
        u+=i*pro_hist[i];

        double t = avgPixel*w -u;
        double variance = t*t/(w*(1-w));
        if(variance>maxVariance)
        {
            maxVariance = variance;
            threshold = i;
        }
    }

    cv::threshold(src,dst,threshold,255,CV_THRESH_BINARY);

}
void GUI::set_img(const cv::Mat &frame, cv::Mat &result, Controller &controller) {

    /** result == controller.current_image_to_display. */

    // Convert controller.current_image_to_process (as src) to RGB(A) controller.current_image_to_display (as dst).
    convert_image_to_gui_output_format(frame, result) ;


    if (widget_current_image_to_display != NULL) {

        delete widget_current_image_to_display ;
    }

    widget_current_image_to_display = Gtk::manage(new Gtk::Image());

    widget_current_image_to_display->clear() ;

    if (result.depth() != CV_8U) { // This desnt' should be !

        result.assignTo(result, CV_8U) ;

    }

    // We resize the original image every time we display it.
    // It's better like this because the image is resized (if needed) only one time per changement
    // Not always resizing the same image.

    //controller.resize_image_to_display(result) ;

    if (controller.get_image_size_gt_layout()) {
        cv::resize(result, result, cv::Size(controller.display_image_size.first, controller.display_image_size.second), 1.0, 1.0, cv::INTER_LINEAR) ;
    }


    IplImage iplimg = _IplImage(result) ;

    widget_current_image_to_display->set(Gdk::Pixbuf::create_from_data( (const guint8 *) iplimg.imageData,
                                         Gdk::COLORSPACE_RGB,
                                         (result.channels() == 4),
                                         iplimg.depth,
                                         iplimg.width,
                                         iplimg.height,
                                         iplimg.widthStep)) ;



    widget_current_image_to_display->show() ;

    display_area.put(*widget_current_image_to_display, controller.current_image_position.first, controller.current_image_position.second) ;

}
Exemple #14
0
    cv::Mat CatOP::execute_current(const cv::Mat& img,
                                    const vector<string>& fields) {
        string key = get_key(fields);
        cout << key << " " << img.cols << " " << img.rows
             << " " << img.channels() << endl;

        std::size_t last_dot = key.rfind(".");
        string ext;
        if (last_dot != string::npos) {
            ext = key.substr(last_dot);
        }

        vector<unsigned char> img_content;
        cv::imencode(ext, img, img_content);
        string base64_string = base64_encode(img_content.data(), img_content.size());

        char const* term = std::getenv("TERM");
        char const* tmux = std::getenv("TMUX");
        bool in_tmux = false;
        if (term != NULL && tmux != NULL) {
            if (strncmp(term, "xterm", 5) == 0
                || strncmp(term, "screen", 6) == 0) {
                if (tmux[0] > 0) {
                    in_tmux = true;
                }
            }
        }

        if (in_tmux) {
            std::cout << "\033Ptmux;\033\033]";
        } else {
            std::cout << "\033]";
        }

        const unsigned char * key_ptr =
            reinterpret_cast<const unsigned char*>(key.data());
        std::cout << "1337;File=name="
                  << base64_encode(key_ptr, key.length())
                  << ";size=" << img_content.size()
                  << ";inline=1:" << base64_string;

        if (in_tmux) {
            std::cout << "\a\033\\";
        } else {
            std::cout << "\a";
        }

        std::cout << std::endl;

        return img;
    }
Exemple #15
0
	float FuyangCalibra(const cv::Mat& in)
		{
			if(in.empty()||in.channels()!=1)
				return 0;
			float r=in.rows;
			float c=in.cols;
			float start_y=c-1;
			float end_y=1;;
			for(int i=0;i<r;i++)
			{
				const uchar* dataI=in.ptr<uchar>(i);
				for(int j=0;j<c;j++)
				{
					if(dataI[j]==0)
						continue;
					else
					{
						if(start_y>i)
						{
							start_y=i;
						}
						if(end_y<i)
						{
							end_y=i;
						}
					}//if

				}//for2 loop
			}//for1 loop

			start_y=(r-1)/2- start_y;
			end_y=(r-1)/2- end_y;
			float A=2*start_y*tan(CHUIZHI)/r;
			float B=2*end_y*tan(CHUIZHI)/r;
			float C=float(DELTA_Y)/float(HEIGHT);
			ROS_INFO("A=  %f  ,B= %f  C=%f ",A,B,C);
			float _a=A-B-A*B*C;
			float _b=A*C+B*C;
			float _c=A-B-C;
			float x1=(-_b+sqrt(_b*_b-4*_a*_c))/(2*_a);
			float x2=(-_b-sqrt(_b*_b-4*_a*_c))/(2*_a);
			float arcx=atan(x1)*180/CV_PI;
			ROS_INFO("a=  %f  ,b= %f   ,c=   %f, ",_a,_b,_c);
			ROS_INFO("start_y=  %f  ,end_y= %f   ,x1=   %f,,x2=   %f ",start_y,end_y,x1,x2);
			ROS_INFO("x=   %f,arcx=   %f ",atan(x1),arcx);
			return arcx;




		};///getPointset();待优化:分类记录点或者去除孤立区域
bool PicOpt::Optimize9Patch::OptimizeOneDirection(bool is_horizontal,
	const cv::Mat &img,
	const cv::Vec2i &patch,
	cv::Mat &new_img,
	cv::Vec2i &new_patch)
{
	int end_pos = is_horizontal ? img.cols : img.rows;
	int border_sum = patch[1] + patch[0];
	int target_len = end_pos - border_sum;
	if (target_len < 2)
	{
		return false;
	}

	Mat premulti_img = (img.channels() == 4) ? Utility::AlphaBlendWithGray(img) : img;
	Mat img_gray;
	cvtColor(premulti_img, img_gray, COLOR_RGB2GRAY);

	int x_order = is_horizontal ? 1 : 0;
	int y_order = !is_horizontal ? 1 : 0;
	Mat gradient;
	Scharr(img_gray, gradient, CV_16S, x_order, y_order);

	cv::Rect resize_rect(Point(), gradient.size());
	if (is_horizontal)
	{
		resize_rect.x = patch[0];
		resize_rect.width = target_len;
	}
	else
	{
		resize_rect.y = patch[0];
		resize_rect.height = target_len;
	}

	Mat abs_grad;
	convertScaleAbs(gradient, abs_grad);
	const Mat &grad_center = abs_grad(resize_rect);

	uint32_t output_quality = NinePatchConfig::GetInstance().GetOutputQuality();
	if (target_len == 2)
	{   // for small picture
		if (!NeedOptSmallSize(grad_center, is_horizontal))
		{
			return false;
		}
	}

	new_img = ResizeImageRect(img, resize_rect, is_horizontal, new_patch);
	return true;
}
Exemple #17
0
void SegmentationImpl::saveImage(std::string filename, cv::Mat image, bool normalize, bool equalize) const
{
    if (image.dims == 3) {
        image = flattenMultiRegionImage(image);
    }
    if (image.channels() == 1) {
        if (normalize) {
            cv::Mat tmp;
            cv::normalize(image, tmp, 0, 255, cv::NORM_MINMAX, CV_8UC1);
            image = tmp;
        } else {
            switch (image.type()) {
            case CV_32FC1:
                image.convertTo(image, CV_8UC1, 255, 0);
                break;
            case CV_8UC1:
            case CV_16UC1:
            default:
                break;
            }
        }
        if (equalize) {
            cv::Mat tmp;
            cv::equalizeHist(image, tmp);
            image = tmp;
        }
    } else if (image.channels() == 3) {
        if (image.type() == CV_32FC3) {
            image.convertTo(image, CV_8UC3, 255.0f, 0);
        }
    }
    std::string type = matTypeToStr(image.type());
    if (cv::imwrite(filename, image)) {
        LINFOF("Saved image '%s' (%s %dx%d)", filename.c_str(), type.c_str(), image.cols, image.rows);
    } else {
        LINFOF("Failed to save image '%s' (%s %dx%d)", filename.c_str(), type.c_str(), image.cols, image.rows);
    }
}
Exemple #18
0
/* Converts the image to black/white, crops it, resizes it */
void NumberClassifier::pre_process(cv::Mat& img) {
    cv::Mat img_gray = img;
    if (img.channels() > 1)
        cv::cvtColor(img, img_gray, CV_BGR2GRAY);
    std::vector<cv::Mat> channels;
    split(img, channels);
    int mean = (int)cv::mean(channels[0])[0];
    int thresh = (*t_func)(mean);
    cv::Mat img_bin;
    cv::threshold(img_gray, img_bin, thresh, 255, cv::THRESH_BINARY);
    img = img_bin;
    crop_img(img, img_w, img_h);
    cv::resize(img, img, cv::Size(img_w,img_h), 0, 0, cv::INTER_AREA);
}
Exemple #19
0
cv::Vec3b calcAvgColor(vector<cv::Point>& contour, cv::Mat& colorImg){
	if(colorImg.channels() == 1){
		cout<<"calcAvgColor(): input img must be 3 channels image!!"<<endl;
		return 0;
	}

	int sum_b=0,sum_g=0,sum_r=0;
	for(int i=0;i<contour.size();i++){
		sum_b += colorImg.at<cv::Vec3b>(contour[i].x,contour[i].y)[0];
		sum_g += colorImg.at<cv::Vec3b>(contour[i].x,contour[i].y)[1];
		sum_r += colorImg.at<cv::Vec3b>(contour[i].x,contour[i].y)[2];
	}
	return cv::Vec3b(sum_b/contour.size(),sum_g/contour.size(),sum_r/contour.size());
}
Exemple #20
0
/**
 * @brief ImageHandler::setImage
 * @param qLabel
 * @param image
 */
void ImageHandler::setImage(QLabel& qLabel, cv::Mat image, QImage::Format imageFormat){
    cv::Mat convertIm;
    //convert to RGB for QT labels
    if(image.channels() > 1)
        cv::cvtColor(image, convertIm, CV_BGR2RGB);
    else
        convertIm = image;
    //refit the Mat to fit inside the QLabel
    convertIm = refit(qLabel.size(), convertIm);
    QImage im = QImage((const unsigned char*) (convertIm.data),
                       convertIm.cols, convertIm.rows,convertIm.step, imageFormat);
    qLabel.setPixmap(QPixmap::fromImage(im));
    qLabel.resize( qLabel.pixmap()->size());
}
Exemple #21
0
void MatCache::set_cv_mat(const cv::Mat &image)
{
	invalidate();

	af_assert(image.depth() == CV_8U);
	if (image.channels() == 1)
		update_image("gray_8u", image);
	else if (image.channels() == 3)
	{
		cv::Mat mat;
		cv::cvtColor(image, mat, CV_BGR2RGB);
		update_image("rgb_8u", mat);
	}
	else if (image.channels() == 4)
	{
		cv::Mat mat;
		cv::cvtColor(image, mat, CV_BGRA2RGB);
		update_image("rgb_8u", mat);
	}
	else
		aifil::except(false, "MatCache: incorrect input channels num: " +
			   std::to_string(image.channels()));
}
void HoughDetectEdge::detect(cv::Mat &image, std::vector<cv::Vec4i> &edges) {
    edges.clear();
    cv::Mat gray;
    if (image.channels() == 3) {
        cv::cvtColor(image, gray, CV_BGR2GRAY);
    } else {
        gray = image.clone();
    }
    canndyImage(gray);
    std::vector<std::vector<cv::Vec4i>> lines;
    houghLines(gray, lines);
    if (lines.size() != 4) return;
    mergeLines(lines, edges);
}
Exemple #23
0
	void operator()(const cv::Mat& image,bool record)
	{
		// no image no fun
		if (image.empty()) return;

		int cF = currentFrame ? 1 : 0;
		int pF = currentFrame ? 0 : 1;

		// resize the image
		cv::resize(image,frames[cF],cv::Size(image.cols*scale,image.rows*scale),0,0,cv::INTER_LINEAR);

		std::cout << "Current: " << cF << " Previous: " << pF << std::endl;
		std::cout << "Image " << image.size() << " c:" << image.channels() << " > " << frames[cF].size() << std::endl;

		if (record && !output.isOpened())
		{
			// fourcc
			int fourCC = FourCC<'X','V','I','D'>::value;
			// set framerate to 1fps - easier to check in a standard video player
			if (output.open("flow.avi",fourCC,25,frames[cF].size(),false))
				{
					std::cout << "capture file opened" << std::endl;
				}		
		}

		// make a copy for the initial frame
		if (frames[pF].empty())
			frames[cF].copyTo(frames[pF]);

		if (!flow.empty())
			flow = cv::Mat::zeros(flow.size(),flow.type());

		// calculate dense optical flow
		cv::calcOpticalFlowFarneback(frames[pF],frames[cF],flow,.5,2,8,3,7,1.5,0);

		// we can't draw into the frame!
		cv::Mat outImg = frames[cF].clone();

		drawOptFlowMap(flow,outImg,8,cv::Scalar::all(255));

		cv::imshow("Flow",outImg);

		// flip the buffers
		currentFrame = !currentFrame;
		
		// record the frame
		if (record && output.isOpened())
			output.write(outImg);
		
	}
Exemple #24
0
void Util::generateRgbFeatureFromImage(const cv::Mat& sourceImage, cv::Mat& features){
    assert(sourceImage.channels() == 3);

    features = cv::Mat(sourceImage.rows*sourceImage.cols, 3, CV_32F);

    for(int offset_y = 0; offset_y < sourceImage.rows; offset_y++){
        for(int offset_x = 0; offset_x < sourceImage.cols; offset_x++){
            features.at<float>(offset_y*sourceImage.cols+offset_x, 0) = (float)sourceImage.at<cv::Vec3b>(offset_y,offset_x)[2];
            features.at<float>(offset_y*sourceImage.cols+offset_x, 1) = (float)sourceImage.at<cv::Vec3b>(offset_y,offset_x)[1];
            features.at<float>(offset_y*sourceImage.cols+offset_x, 2) = (float)sourceImage.at<cv::Vec3b>(offset_y,offset_x)[0];
        }
    }
    return;
}
void cv::ocl::oclMat::download(cv::Mat &m) const
{
    CV_DbgAssert(!this->empty());
    //   int t = type();
    //   if(download_channels == 3)
    //{
    //	t = CV_MAKETYPE(depth(), 3);
    //}
    m.create(wholerows, wholecols, type());

    if(m.channels() == 3)
    {
        int pitch = wholecols * 3 * m.elemSize1();
        int tail_padding = m.elemSize1() * 3072;
        int err;
        cl_mem temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE,
                                     (pitch * wholerows + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
        openCLVerifyCall(err);

        convert_C4C3(*this, temp);
        openCLMemcpy2D(clCxt, m.data, m.step, temp, pitch, wholecols * m.elemSize(), wholerows, clMemcpyDeviceToHost, 3);
        //int* cputemp=new int[wholecols*wholerows * 3];
        //int* cpudata=new int[this->step*this->wholerows/sizeof(int)];
        //openCLSafeCall(clEnqueueReadBuffer(clCxt->impl->clCmdQueue, temp, CL_TRUE,
        //						0, wholecols*wholerows * 3* sizeof(int), cputemp, 0, NULL, NULL));
        //openCLSafeCall(clEnqueueReadBuffer(clCxt->impl->clCmdQueue, (cl_mem)data, CL_TRUE,
        //						0, this->step*this->wholerows, cpudata, 0, NULL, NULL));
        //for(int i=0;i<wholerows;i++)
        //{
        //	int *a = cputemp+i*wholecols * 3,*b = cpudata + i*this->step/sizeof(int);
        //	for(int j=0;j<wholecols;j++)
        //	{
        //		if((a[3*j] != b[4*j])||(a[3*j+1] != b[4*j+1])||(a[3*j+2] != b[4*j+2]))
        //			printf("rows=%d,cols=%d,cputtemp=%d,%d,%d;cpudata=%d,%d,%d\n",
        //			i,j,a[3*j],a[3*j+1],a[3*j+2],b[4*j],b[4*j+1],b[4*j+2]);
        //	}
        //}
        //delete []cputemp;
        //delete []cpudata;
        openCLSafeCall(clReleaseMemObject(temp));
    }
    else
    {
        openCLMemcpy2D(clCxt, m.data, m.step, data, step, wholecols * elemSize(), wholerows, clMemcpyDeviceToHost);
    }
    Size wholesize;
    Point ofs;
    locateROI(wholesize, ofs);
    m.adjustROI(-ofs.y, ofs.y + rows - wholerows, -ofs.x, ofs.x + cols - wholecols);
}
cv::Mat ossimOpenCvDisparityMapGenerator::execute(cv::Mat master_mat, cv::Mat slave_mat)
{
	cout << "DISPARITY MAP GENERATION..." << endl;
	// Disparity Map generation
	int ndisparities = 16; //Maximum disparity minus minimum disparity //con fattore di conversione 1 metti 16*2*2
	int SADWindowSize = 11;   //Matched block size

	cv::StereoSGBM sgbm;

	sgbm.preFilterCap = 63;
	sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;

	int cn = master_mat.channels();

	sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
	sgbm.P2 = 40*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
	sgbm.minDisparity = -8; // Minimum possible disparity value  //con fattore di conversione 1 metti -16*2
	sgbm.numberOfDisparities = ndisparities;
	sgbm.uniquenessRatio = 5;
	sgbm.speckleWindowSize = 100;
	sgbm.speckleRange = 1;
	sgbm.disp12MaxDiff = 1; // Maximum allowed difference (in integer pixel units) in the left-right disparity check
	//sgbm.fullDP = true;
	
	double minVal, maxVal;
	cv::Mat array_disp;
	cv::Mat array_disp_8U;   
	sgbm(master_mat, slave_mat, array_disp);
	minMaxLoc( array_disp, &minVal, &maxVal );
	array_disp.convertTo( array_disp_8U, CV_8UC1, 255/(maxVal - minVal), -minVal*255/(maxVal - minVal));   
    cout << "min\t" << minVal << "max\t" << maxVal << endl;
	cv::namedWindow( "SGM Disparity", CV_WINDOW_NORMAL );
	cv::imshow( "SGM Disparity", array_disp_8U);
	cv::imwrite( "SGM Disparity.tif", array_disp_8U);
	
	cv::waitKey(0);


	//Create and write the log file
	ofstream disparity;
	disparity.open ("DSM_parameters_disparity.txt");
	disparity <<"DISPARITY RANGE:" << " " << ndisparities << endl;
	disparity <<"SAD WINDOW SIZE:" << " " << SADWindowSize<< endl;
	disparity << "MINIMUM DISPARITY VALUE:"<< sgbm.minDisparity << endl;
	disparity.close();	


	
	return array_disp;
}
Exemple #27
0
/***************************************************************************************************
Metodos
Autor: Alexander Gómez villa - Sebastian Guzman Obando - German Diez Valencia
Descripcion: Encuentra el numero de pixeles que superan el umbral y lo comola en el entero #cambios,
retorna código de error.

***************************************************************************************************/
void Simage::colorReduce(cv::Mat &image, int div)
{
    int nl=image.rows;//numero de lineas
    int nc=image.cols*image.channels();//numero total de columnas = nc* numero de canales

    for(int j=0; j<nl ; j++)
    {
        uchar* data= image.ptr<uchar>(j);
        for(int i=0 ; i<nc ; i++)
        {
            data[i]= data[i]/div*div + div/2;
        }
    }
}
Exemple #28
0
void Simage::salt(cv::Mat &image, int n)
{
    for(int k=0; k<n ; k++)
    {
        //rand() is the MFC random number generator
        //try qrand with Qt
        int i= rand()%image.cols;
        int j= rand()%image.rows;

        if(image.channels()==1)
        {
            //if the image is gray-level
            image.at<uchar>(j,i)=255;
        }
        else if(image.channels()==3)
        {
            //if there are 3 chanels(color image)
             image.at<cv::Vec3b>(j,i)[0]=255;
             image.at<cv::Vec3b>(j,i)[1]=255;
             image.at<cv::Vec3b>(j,i)[2]=255;
        }
    }
}
Exemple #29
0
cv::Mat CannyEdges::processImage(cv::Mat image) const{
    double up = getUpThreshold();
    double low = getLowThreshold();
    int aperture = getApertureSize();
    bool L2 = getUseL2();

    if (image.channels() == 3){
        cv::cvtColor(image,image,cv::COLOR_RGB2GRAY);
    }

    cv::Canny(image,image,up,low,aperture,L2);

    return image;
}
Exemple #30
0
double mylib::WeightedSum(const cv::Mat& A, const cv::Mat& W)
{
	if (A.size() != W.size())
		exit(0);
	//int a = A.type();
	//int b = W.type();
	double result=0.0;
	cv::Scalar s;
	vector<Mat> Wv;
	Mat W3;
	for (int i=0; i< A.channels(); i++)
	{
		Wv.push_back(W);
	}
	cv::merge(Wv,W3);
	W3 = A.mul(W3);
	s = cv::sum(W3);
	for (int i=0; i<A.channels(); i++)
		result += s[i];
	return result / A.channels();


}