Example #1
0
File: draw.cpp Project: RebUT/REBUT
static void _prepareImgAndDrawKeypoints( const Mat& img1, const vector<KeyPoint>& keypoints1,
                                         const Mat& img2, const vector<KeyPoint>& keypoints2,
                                         Mat& outImg, Mat& outImg1, Mat& outImg2,
                                         const Scalar& singlePointColor, int flags )
{
    Size size( img1.cols + img2.cols, MAX(img1.rows, img2.rows) );
    if( flags & DrawMatchesFlags::DRAW_OVER_OUTIMG )
    {
        if( size.width > outImg.cols || size.height > outImg.rows )
            CV_Error( CV_StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
    }
    else
    {
        outImg.create( size, CV_MAKETYPE(img1.depth(), 3) );
        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );

        if( img1.type() == CV_8U )
            cvtColor( img1, outImg1, CV_GRAY2BGR );
        else
            img1.copyTo( outImg1 );

        if( img2.type() == CV_8U )
            cvtColor( img2, outImg2, CV_GRAY2BGR );
        else
            img2.copyTo( outImg2 );
    }

    // draw keypoints
    if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
    {
        Mat outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        drawKeypoints( outImg1, keypoints1, outImg1, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );

        Mat outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
        drawKeypoints( outImg2, keypoints2, outImg2, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
    }
}
void PatchGenerator::operator ()(const Mat& image, const Mat& T,
                                 Mat& patch, Size patchSize, RNG& rng) const
{
    patch.create( patchSize, image.type() );
    if( backgroundMin != backgroundMax )
    {
        rng.fill(patch, RNG::UNIFORM, Scalar::all(backgroundMin), Scalar::all(backgroundMax));
        warpAffine(image, patch, T, patchSize, INTER_LINEAR, BORDER_TRANSPARENT);
    }
    else
        warpAffine(image, patch, T, patchSize, INTER_LINEAR, BORDER_CONSTANT, Scalar::all(backgroundMin));

    int ksize = randomBlur ? (unsigned)rng % 9 - 5 : 0;
    if( ksize > 0 )
    {
        ksize = ksize*2 + 1;
        GaussianBlur(patch, patch, Size(ksize, ksize), 0, 0);
    }

    if( noiseRange > 0 )
    {
        AutoBuffer<uchar> _noiseBuf( patchSize.width*patchSize.height*image.elemSize() );
        Mat noise(patchSize, image.type(), (uchar*)_noiseBuf);
        int delta = image.depth() == CV_8U ? 128 : image.depth() == CV_16U ? 32768 : 0;
        rng.fill(noise, RNG::NORMAL, Scalar::all(delta), Scalar::all(noiseRange));
        if( backgroundMin != backgroundMax )
            addWeighted(patch, 1, noise, 1, -delta, patch);
        else
        {
            for( int i = 0; i < patchSize.height; i++ )
            {
                uchar* prow = patch.ptr<uchar>(i);
                const uchar* nrow =  noise.ptr<uchar>(i);
                for( int j = 0; j < patchSize.width; j++ )
                    if( prow[j] != backgroundMin )
                        prow[j] = saturate_cast<uchar>(prow[j] + nrow[j] - delta);
            }
        }
    }
}
Example #3
0
int main(int argc, char *argv[])
{
    float alpha = 0;
    int   beta  = 0;
    Mat   image;
    Mat   result;

    if (argc<2) {
        cout<<"Usage:./contrast [image_name]"<<endl;
        return -1;
    }

    image = imread(argv[1]);
    if (!image.data) {
        cout<<"Read image error."<<endl;
    }

    cout<<"Enter alpha value[1-3]:"<<endl;
    cin>>alpha;
    cout<<"Enter beta value[0-100]:"<<endl;
    cin>>beta;    

    result.create(image.size(), image.type()); 

    int n = image.rows * image.cols * image.channels();
    uchar *p = image.data;
    uchar *q = result.data;
    for (int i=0; i<n; i++) {
        *q++ = saturate_cast<uchar>(alpha * (*p++) + beta);     
    }

    namedWindow("origin", CV_WINDOW_AUTOSIZE);
    imshow("origin", image);
    namedWindow("result", CV_WINDOW_AUTOSIZE);
    imshow("result", result);

    waitKey(0);

    return 0;
}
Example #4
0
void pow_fmath(const Mat& src, const float a, Mat & dest)
{
    if (dest.empty())dest.create(src.size(), CV_32F);

    int width = src.cols;
    int height = src.rows;

    int size = src.size().area();
    int i = 0;

    const float* s = src.ptr<float>(0);
    float* d = dest.ptr<float>(0);
    const __m128 ma = _mm_set1_ps(a);
    for (i = 0; i <= size - 4; i += 4)
    {
        _mm_store_ps(d + i, _mm_pow_ps(_mm_load_ps(s + i), ma));
    }
    for (; i < size; i++)
    {
        d[i] = cv::pow(s[i], a);
    }
}
void AdaptiveManifoldFilterN::h_filter(const Mat1f& src, Mat& dst, float sigma)
{
    CV_DbgAssert(src.depth() == CV_32F);

    const float a = exp(-sqrt(2.0f) / sigma);

    dst.create(src.size(), CV_32FC1);

    for (int y = 0; y < src.rows; ++y)
    {
        const float* src_row = src[y];
        float* dst_row = dst.ptr<float>(y);

        dst_row[0] = src_row[0];
        for (int x = 1; x < src.cols; ++x)
        {
            dst_row[x] = src_row[x] + a * (dst_row[x - 1] - src_row[x]);
        }
        for (int x = src.cols - 2; x >= 0; --x)
        {
            dst_row[x] = dst_row[x] + a * (dst_row[x + 1] - dst_row[x]);
        }
    }

    for (int y = 1; y < src.rows; ++y)
    {
        float* dst_cur_row = dst.ptr<float>(y);
        float* dst_prev_row = dst.ptr<float>(y-1);

        rf_vert_row_pass(dst_cur_row, dst_prev_row, a, src.cols);
    }
    for (int y = src.rows - 2; y >= 0; --y)
    {
        float* dst_cur_row = dst.ptr<float>(y);
        float* dst_prev_row = dst.ptr<float>(y+1);

        rf_vert_row_pass(dst_cur_row, dst_prev_row, a, src.cols);
    }
}
Example #6
0
void vIdle() {
    if (TheCaptureFlag) {
        // capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve(TheInputImage);
        TheUndInputImage.create(TheInputImage.size(), CV_8UC3);
        // by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage, TheInputImage, CV_BGR2RGB);
        // remove distorion in image
        cv::undistort(TheInputImage, TheUndInputImage, TheCameraParams.CameraMatrix,
                      TheCameraParams.Distorsion);
        // detect markers
        MDetector.detect(TheUndInputImage, TheMarkers);
        // Detection of the board
        TheBoardDetected.second = TheBoardDetector.detect(
            TheMarkers, TheBoardConfig, TheBoardDetected.first, TheCameraParams, TheMarkerSize);
        // chekc the speed by calculating the mean speed of all iterations
        // resize the image to the size of the GL window
        cv::resize(TheUndInputImage, TheResizedImage, TheGlWindowSize);
    }
    glutPostRedisplay();
}
void matLoad_oneChannel(string fileName, Mat &data)
{
	ifstream infileBin;
	infileBin.open(fileName, ios::binary);
	int row = 0;
	int col = 0;

	infileBin.read((char *)&row, sizeof(row));
	infileBin.read((char *)&col, sizeof(col));
	data.create(row, col, CV_32FC1);
	for (int h = 0; h < row; h++)
	{
		for (int w = 0; w < col; w++)
		{
			float tmp = 0;
			infileBin.read((char *)&tmp, sizeof(tmp));
			data.at<float>(h, w) = tmp;
		}
	}

	infileBin.close();
}
Example #8
0
static void meanShiftFiltering_(const Mat &src_roi, Mat &dst_roi, int sp, int sr, cv::TermCriteria crit)
{
    if( src_roi.empty() )
        CV_Error( Error::StsBadArg, "The input image is empty" );

    if( src_roi.depth() != CV_8U || src_roi.channels() != 4 )
        CV_Error( Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );

    dst_roi.create(src_roi.size(), src_roi.type());

    CV_Assert( (src_roi.cols == dst_roi.cols) && (src_roi.rows == dst_roi.rows) );
    CV_Assert( !(dst_roi.step & 0x3) );

    if( !(crit.type & cv::TermCriteria::MAX_ITER) )
        crit.maxCount = 5;
    int maxIter = std::min(std::max(crit.maxCount, 1), 100);
    float eps;
    if( !(crit.type & cv::TermCriteria::EPS) )
        eps = 1.f;
    eps = (float)std::max(crit.epsilon, 0.0);

    int tab[512];
    for(int i = 0; i < 512; i++)
        tab[i] = (i - 255) * (i - 255);
    uchar *sptr = src_roi.data;
    uchar *dptr = dst_roi.data;
    int sstep = (int)src_roi.step;
    int dstep = (int)dst_roi.step;
    cv::Size size = src_roi.size();

    for(int i = 0; i < size.height; i++, sptr += sstep - (size.width << 2),
        dptr += dstep - (size.width << 2))
    {
        for(int j = 0; j < size.width; j++, sptr += 4, dptr += 4)
        {
            do_meanShift(j, i, sptr, dptr, sstep, size, sp, sr, maxIter, eps, tab);
        }
    }
}
Example #9
0
void FireVibe::update(const Mat &image, Mat &foreImage)
{
    CV_Assert(image.type() == imageType && image.cols == imageWidth && image.rows == imageHeight);

    foreImage.create(imageHeight, imageWidth, CV_8UC1);

    // 画面最外面一圈的像素默认为背景
    // 本算法的前景检测和背景模型更新过程涉及到邻域操作
    // 最外面一圈像素的邻域是不完整的, 如果要进行处理, 需要额外的判断
    // 但是在实际的应用中, 画面不会太小, 最外面一圈像素的识别结果不会对最终的结果产生显著的影响
    // 所以直接把这一圈像素忽略掉了
    foreImage.row(0).setTo(0);
    foreImage.row(imageHeight - 1).setTo(0);
    foreImage.col(0).setTo(0);
    foreImage.col(imageWidth - 1).setTo(0);

    // 按照实际的图片类型进行处理
    if (imageChannels == 3)
        proc8UC3(image, foreImage);
    else if (imageChannels == 1)
        proc8UC1(image, foreImage);
}
Example #10
0
void dibujaPatron(Mat *interferograma, int dim,int width, int height, string nombre){
    Mat imagen;
    imagen.create(height,width,CV_32FC3);
    imagen.setTo(Scalar::all(0));
    for (int i=0;i<dim;i++){
        for (int x=0;x<imagen.cols;x++){
            int j=x*interferograma[i].cols/imagen.cols;
            int y=50+100*(i)+50*(get2D32F(interferograma[i],j,0,0));

            //cout<<"x: " << j*2*Xmax[i]/(interf[i].rows-1) << " y: "<<y-250 <<endl;
            set2D32F(imagen,x,y,i,1);
            set2D32F(imagen,x,100*i,i,1);
            set2D32F(imagen,x,50+100*i,i,1);
            set2D32F(imagen,x,100+100*i,i,1);
        }
        for (int y=0;y<50;y++){
            set2D32F(imagen,(int)(imagen.cols/2),50+100*i+y-25,i,1);
        }

    }
    imshow( nombre, imagen);
}
/**
  * Logica que realiza el match_template normal.
  * Se llama dentro de normal(Mat)
  **/
void MatchTemplate::normal()
{
    /// Create the result matrix
    Mat result;
    int result_cols =  mOriginalImage.cols - mTemplateImg.cols + 1;
    int result_rows = mOriginalImage.rows - mTemplateImg.rows + 1;
    result.create( result_rows, result_cols, CV_32FC1 );

    /// @LuisAlonso, el tiempo del match template empieza aqui

    mBenchmark->startTimer();
    mBenchmark->startTickCPU();

    /// Realiza el match template
    matchTemplate( mOriginalImage, mTemplateImg, result, MATCH_METHOD );

    mBenchmark->markLapTimer();
    mBenchmark->markLapTickCPU();

    /// @LuisAlonso, y termina aqui

    normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );

    /// Localizing the best match with minMaxLoc
    double minVal; double maxVal; Point minLoc; Point maxLoc; Point matchLoc;
    minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );

    /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
    if( MATCH_METHOD  == CV_TM_SQDIFF || MATCH_METHOD == CV_TM_SQDIFF_NORMED )
    { matchLoc = minLoc; }
    else
    { matchLoc = maxLoc; }

    /// Show me what you got
    rectangle( result, matchLoc, Point( matchLoc.x + mTemplateImg.cols , matchLoc.y + mTemplateImg.rows ), Scalar::all(0), 2, 8, 0 );

    /// Notifica a quien este conectado
    emit onMatchTemplateFinished(result);
}
Example #12
0
void laplacianFiltering(const Mat& input, const Mat& laplacianMask, float scale, Mat& output, Mat& scaledLaplacian){
    Mat input_tmp,tmp;
    float* input_data;
    float* output_data;
    float* scaleLap_data;
    double t = (double)getTickCount();
    Size inputSize;
    spatialFiltering(input, laplacianMask, scaledLaplacian); //get scaledLaplacian(haven't multiple scale)
    // To check input data is float or uchar
    if(input.type() == CV_8UC(1)){
        input.convertTo(input_tmp, CV_32FC(1), 1/255.0);
    }
    else if(input.type() == CV_32FC(1)){
        input_tmp = input;
    }
    else{
        printf("Error Type in laplacianFiltering!!!\n");
        exit(0);
    }
    inputSize = input.size();
    output.create(inputSize, CV_32FC(1));
    scaleLap_data = (float*)scaledLaplacian.data;
    input_data = (float*)input_tmp.data;
    output_data = (float*)output.data;
    // start doing laplace transform
    for(int p=0; p<inputSize.width*inputSize.height; p++){
        float buf;
        scaleLap_data[p] = scaleLap_data[p]*scale;//multiple scale
        buf = input_data[p] + scaleLap_data[p]; // add to origin input
        if(buf>1) // if value > 1
            buf = 1;
        else if(buf<0)// if value <0
            buf = 0;
        output_data[p] = buf;
    }
    t = (double)getTickCount()-t;
    printf("Laplacian total consume %gms\n", t*1000/getTickFrequency());// get processing time
    return;
}
//-----------------------------------【main( )函数】--------------------------------------------
//		描述:控制台应用程序的入口函数,我们的程序从这里开始
//-------------------------------------------------------------------------------------------------
int main( ) 
{
	//从摄像头读入视频
	VideoCapture cap(0);

	//检测是否读取成功
	if(!cap.isOpened()) {
		cout << "Capture could not be opened succesfully" << endl;
		return -1;
	}

	namedWindow("Video");
	namedWindow("Backprojection");

	setMouseCallback("Video", on_mouse);

	while(char(waitKey(1)) != 'q' && cap.isOpened()) {
		cap >> frame;
		if(!selected) mask.create(frame.rows+2, frame.cols+2, CV_8UC1);
		// Check if the video is over
		if(frame.empty()) {
			cout << "Video over" << endl;
			break;
		}
		cvtColor(frame, frame_hsv, CV_BGR2HSV);

		// backproject on the HSV image
		Mat frame_backprojected = Mat::zeros(frame.size(), CV_8UC1);        
		if(selected) {
			int channels[] = {0, 1};
			calcBackProject(&frame_hsv, 1, channels, hist, frame_backprojected, ranges);
		}

		imshow("Video", frame);
		imshow("Backprojection", frame_backprojected);
	}

	return 0;
}
Example #14
0
/// <summary>
/// Converts from Kinect depth frame data into a ARGB OpenCV image matrix
/// User must pre-allocate space for matrix.
/// </summary>
/// <param name="pImage">pointer in which to return the OpenCV image matrix</param>
/// <returns>S_OK if successful, an error code otherwise</returns>
HRESULT OpenCVFrameHelper::GetDepthDataAsArgb(Mat* pImage) const
{
    DWORD depthWidth, depthHeight;
    NuiImageResolutionToSize(m_depthResolution, depthWidth, depthHeight);

    // Get the depth image
    Mat depthImage;
    depthImage.create(depthHeight, depthWidth, DEPTH_TYPE);
    HRESULT hr = GetDepthData(&depthImage);
    if (!SUCCEEDED(hr)) {
        return hr;
    }

    for (UINT y = 0; y < depthHeight; ++y)
    {
        // Get row pointers for Mats
        const USHORT* pDepthRow = depthImage.ptr<USHORT>(y);
        Vec4b* pDepthRgbRow = pImage->ptr<Vec4b>(y);

        for (UINT x = 0; x < depthWidth; ++x)
        {
            USHORT raw_depth = pDepthRow[x];

            // If depth value is valid, convert and copy it
            if (raw_depth != 65535)
            {
                UINT8 redPixel, greenPixel, bluePixel;
                DepthShortToRgb(raw_depth, &redPixel, &greenPixel, &bluePixel);
                pDepthRgbRow[x] = Vec4b(redPixel, greenPixel, bluePixel, 1);
            }
            else
            {
                pDepthRgbRow[x] = 0;
            }
        }
    }

    return S_OK;
}
Example #15
0
static void getGradientInterleave(const Mat& image,Mat & grad){
    //Image gradients for alignment
    //Note that these gradients have theoretical problems under the sudden 
    // changes model of images. It might be wise to blur the images before 
    // alignment, to avoid sudden changes, but that makes occlusion more 
    // problematic.
    grad.create(2,image.rows*image.cols,CV_32FC1);
    Mat gray;
    if (image.type()==CV_32FC1) {
        gray=image;
    }else {
        cvtColor(image, gray, CV_BGR2GRAY);
        gray.convertTo(gray,CV_32FC1);
    }
    Mat gradX(image.rows,image.cols,CV_32FC1);
    Scharr( gray, gradX, CV_32FC1, 1, 0, 1.0/26.0, 0, BORDER_REPLICATE );
    Mat gradY(image.rows,image.cols,CV_32FC1);
    Scharr( gray, gradY, CV_32FC1, 0, 1, 1.0/26.0, 0, BORDER_REPLICATE);
    Mat src [2]={gradY,gradX};
    merge(src,2,grad);
    
}
Example #16
0
int main() {
    // Create a VideoCapture object to read from video file
    // 0 is the ID of the built-in laptop camera, change if you want to use other camera
    VideoCapture cap(0);
    
    //check if the file was opened properly
    if(!cap.isOpened()) {
        cout << "Capture could not be opened succesfully" << endl;
        return -1;
    }

    namedWindow("Video");
    namedWindow("Backprojection");

    setMouseCallback("Video", on_mouse);

    while(char(waitKey(1)) != 'q' && cap.isOpened()) {
        cap >> frame;
        if(!selected) mask.create(frame.rows+2, frame.cols+2, CV_8UC1);
        // Check if the video is over
        if(frame.empty()) {
            cout << "Video over" << endl;
            break;
        }
        cvtColor(frame, frame_hsv, CV_BGR2HSV);

        // backproject on the HSV image
        Mat frame_backprojected = Mat::zeros(frame.size(), CV_8UC1);        
        if(selected) {
            int channels[] = {0, 1};
            calcBackProject(&frame_hsv, 1, channels, hist, frame_backprojected, ranges);
        }

        imshow("Video", frame);
        imshow("Backprojection", frame_backprojected);
    }

    return 0;
}
	Mat process(const Mat &image){

		result.create(image.rows, image.cols, CV_8U);
		// cout<<__LINE__<<endl;
		Mat_<Vec3b>::const_iterator it = image.begin<Vec3b>();
		// cout<<__LINE__<<endl;
		Mat_<Vec3b>::const_iterator itend = image.end<Vec3b>();
		//cout<<__LINE__<<endl;
		//cout<<*(result.begin<uchar>())<<endl;
		MatIterator_<uchar> itout = result.begin<uchar>();
		//cout<<__LINE__<<endl;
		for(; it != itend; ++it, ++itout){

			if (getDistance(*it) < minDist)
			{
				*itout = 255;
			} else {
				*itout = 0;
			}
		}
		return result;
	}
Example #18
0
static
void cvtDepth2Cloud( const Mat& depth, Mat& cloud, const Mat& cameraMatrix )
{
    CV_Assert( cameraMatrix.type() == CV_64FC1 );
    const double inv_fx = 1.f/cameraMatrix.at<double>(0,0);
    const double inv_fy = 1.f/cameraMatrix.at<double>(1,1);
    const double ox = cameraMatrix.at<double>(0,2);
    const double oy = cameraMatrix.at<double>(1,2);
    cloud.create( depth.size(), CV_32FC3 );
    for( int y = 0; y < cloud.rows; y++ )
    {
        Point3f* cloud_ptr = reinterpret_cast<Point3f*>(cloud.ptr(y));
        const float* depth_prt = reinterpret_cast<const float*>(depth.ptr(y));
        for( int x = 0; x < cloud.cols; x++ )
        {
            float z = depth_prt[x];
            cloud_ptr[x].x = (float)((x - ox) * z * inv_fx);
            cloud_ptr[x].y = (float)((y - oy) * z * inv_fy);
            cloud_ptr[x].z = z;
        }
    }
}
void Threshold_Demo( int, void* )
{
	threshold( src, dst, 255, 0, 4 );
	dst = src.clone();
	Mat noisyI;
	noisyI.create (dst.rows,dst.cols,CV_32FC(1));
	noisyI.setTo (cv::Scalar::all (0));

	vector <Mat>  _channel;
	dst.convertTo (dst,CV_32FC(3),1.0,0);
	cv::split(dst,_channel);
	for(int i=0;i<dst.channels ();i++)
	{
		randn(noisyI, Scalar::all(0), Scalar::all((double)threshold_value)/2);
		add(_channel[i],noisyI,_channel[i]);
	}

	cv::merge (_channel,dst);
	dst.convertTo (dst,CV_8UC(3),1.0,0);

	imshow( "Add GaussianNoise", dst );
}
Example #20
0
int main( int, char** argv )
{
  /// Load an image
  src = imread( argv[1] );

  /// Create a matrix of the same type and size as src (for dst)
  dst.create( src.size(), src.type() );

  /// Convert the image to grayscale
  cvtColor( src, src_gray, COLOR_BGR2GRAY );

  /// Create a window
  namedWindow( window_name, WINDOW_AUTOSIZE );

  /// Create a Trackbar for user to enter threshold
  createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );

  CannyThreshold(0, 0);

  waitKey(0);
  return 0;
}
Example #21
0
/*!
 *  
 */
static void vIdle()
{
  if (capture)
  {
    //capture image
    vCapturer >> inImg;
    assert(inImg.empty()==false);
    undImg.create(inImg.size(),CV_8UC3);
    //by default, opencv works in BGR, so we must convert to RGB because OpenGL in windows prefer
//    cv::cvtColor(inImg,inImg,CV_BGR2RGB);
    //remove distortion in image
    cv::undistort(inImg,undImg, camParams.getCamMatrix(),camParams.getDistor());
    //detect markers
    mDetector.detect(undImg,markers,camParams.getCamMatrix(),Mat(),msiz->dval[0]);
    //Detection of the board
    board.second=bDetector.detect(markers, boardConfig,board.first, camParams,msiz->dval[0]);
    //check the speed by calculating the mean speed of all iterations
    //resize the image to the size of the GL window
    cv::resize(undImg, resImg, glSize);
  }
  glutPostRedisplay();
}
Mat GradientSumFilter::applyTo(const Mat& image, Mat& filtered) const {
	if (image.type() != CV_8UC2)
		throw invalid_argument("GradientSumFilter: the image must be of type CV_8UC2");
	if (image.rows % rows != 0)
		throw invalid_argument("GradientSumFilter: image row count (" + std::to_string(image.rows) + ") is not divisible by cell count (" + std::to_string(rows) + ")");
	if (image.cols % cols != 0)
		throw invalid_argument("GradientSumFilter: image column count (" + std::to_string(image.cols) + ") is not divisible by cell count (" + std::to_string(cols) + ")");

	filtered.create(1, rows * cols * 4, CV_32FC1);
	int cellWidth = image.cols / cols;
	int cellHeight = image.rows / rows;
	float* values = filtered.ptr<float>();
	float normalizer = 1.f / 127.f;
	for (int row = 0; row < rows; ++row) {
		for (int col = 0; col < cols; ++col) {
			float sdx = 0;
			float sdy = 0;
			float sadx = 0;
			float sady = 0;
			for (int j = 0; j < cellHeight; ++j) {
				for (int i = 0; i < cellWidth; ++i) {
					const Vec2b& gradient = image.at<Vec2b>(row * cellHeight + j, col * cellWidth + i);
					float dx = normalizer * (static_cast<int>(gradient[0]) - 127);
					float dy = normalizer * (static_cast<int>(gradient[1]) - 127);
					sdx += dx;
					sdy += dy;
					sadx += fabs(dx);
					sady += fabs(dy);
				}
			}
			values[0] = sdx;
			values[1] = sdy;
			values[2] = sadx;
			values[3] = sady;
			values += 4;
		}
	}
	return filtered;
}
Example #23
0
void testReduce( const Mat& src, Mat& sum, Mat& avg, Mat& max, Mat& min, int dim )
{
    assert( src.channels() == 1 );
    if( dim == 0 ) // row
    {
        sum.create( 1, src.cols, CV_64FC1 ); 
        max.create( 1, src.cols, CV_64FC1 );
        min.create( 1, src.cols, CV_64FC1 );
    }
    else
    {
        sum.create( src.rows, 1, CV_64FC1 ); 
        max.create( src.rows, 1, CV_64FC1 );
        min.create( src.rows, 1, CV_64FC1 );
    }
    sum.setTo(Scalar(0));
    max.setTo(Scalar(-DBL_MAX));
    min.setTo(Scalar(DBL_MAX));

    if( dim == 0 )
    {
        for( int ri = 0; ri < src.rows; ri++ )
        {
            for( int ci = 0; ci < src.cols; ci++ )
            {
                sum.at<double>(0, ci) += src.at<Type>(ri, ci);
                max.at<double>(0, ci) = std::max( max.at<double>(0, ci), (double)src.at<Type>(ri, ci) );
                min.at<double>(0, ci) = std::min( min.at<double>(0, ci), (double)src.at<Type>(ri, ci) );
            }
        }
    }
    else
    {
        for( int ci = 0; ci < src.cols; ci++ )
        {
            for( int ri = 0; ri < src.rows; ri++ )
            {
                sum.at<double>(ri, 0) += src.at<Type>(ri, ci);
                max.at<double>(ri, 0) = std::max( max.at<double>(ri, 0), (double)src.at<Type>(ri, ci) );
                min.at<double>(ri, 0) = std::min( min.at<double>(ri, 0), (double)src.at<Type>(ri, ci) );
            }
        }
    }
    sum.convertTo( avg, CV_64FC1 );
    avg = avg * (1.0 / (dim==0 ? (double)src.rows : (double)src.cols));
}
Example #24
0
//------------------------------------------------------------------------------
const Mat ImageNode::getFlowField(const Mat& u, const Mat& v) {
  Mat flowField;
  float maxDisplacement;

  for (int i = 0; i < u.rows; ++i) {
    const float* ptr_u = u.ptr<float>(i);
    const float* ptr_v = v.ptr<float>(i);

    for (int j = 0; j < u.cols; ++j) {
      float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j]));

      if (d > maxDisplacement)
        maxDisplacement = d;
    }
  }

  flowField.create(u.size(), CV_8UC4);

  for (int i = 0; i < flowField.rows; ++i) {
    const float* ptr_u = u.ptr<float>(i);
    const float* ptr_v = v.ptr<float>(i);

    Vec4b* row = flowField.ptr<Vec4b>(i);

    for (int j = 0; j < flowField.cols; ++j) {
      row[j][0] = 0;
      row[j][1] = static_cast<unsigned char>(mapValue(-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f));
      row[j][2] = static_cast<unsigned char>(mapValue(ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f));
      row[j][3] = 255;
    }
  }
  //  GpuMat flowGpu = GpuMat(flowField);
  //  double *min, *max;
  //  minMax(flowGpu, min, max);
  //  cout<<"Min: "<<min<<" Max: "<<max<<endl;
  addQuiver(flowField, u, v);
  return flowField;
}
Example #25
0
void TemplateTracker::track(Mat &depthImage, Rect segRegion) {

	/*if(deathFlagShouldRemove) {

		return;
	}*/

	Mat img = depthImage;

	int result_cols =  img.cols - templ.cols + 1;
	int result_rows = img.rows - templ.rows + 1;

	Mat result;
	result.create( result_cols, result_rows, CV_32FC1 );

	matchTemplate(depthImage, templ, result, CV_TM_CCORR_NORMED);

	normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
	

	double minVal; double maxVal; Point minLoc; Point maxLoc;
	Point matchLoc;

	minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );

	matchLoc = maxLoc;
	
	trackedRegion = Rect(matchLoc.x, matchLoc.y, templ.cols, templ.rows);

	// All the if/elses based on body type make me cry
	if(bodyType == HEAD) {
		reconciledHeads(segRegion, trackedRegion, depthImage);
	}
	else if(bodyType == CHEST) {
		
	}
	
}
void PatchGenerator::warpWholeImage(const Mat& image, Mat& matT, Mat& buf,
                                    Mat& warped, int border, RNG& rng) const
{
    Mat_<double> T = matT;
    Rect roi(INT_MAX, INT_MAX, INT_MIN, INT_MIN);

    for( int k = 0; k < 4; k++ )
    {
        Point2f pt0, pt1;
        pt0.x = (float)(k == 0 || k == 3 ? 0 : image.cols);
        pt0.y = (float)(k < 2 ? 0 : image.rows);
        pt1.x = (float)(T(0,0)*pt0.x + T(0,1)*pt0.y + T(0,2));
        pt1.y = (float)(T(1,0)*pt0.x + T(1,1)*pt0.y + T(1,2));

        roi.x = std::min(roi.x, cvFloor(pt1.x));
        roi.y = std::min(roi.y, cvFloor(pt1.y));
        roi.width = std::max(roi.width, cvCeil(pt1.x));
        roi.height = std::max(roi.height, cvCeil(pt1.y));
    }

    roi.width -= roi.x - 1;
    roi.height -= roi.y - 1;
    int dx = border - roi.x;
    int dy = border - roi.y;

    if( (roi.width+border*2)*(roi.height+border*2) > buf.cols )
        buf.create(1, (roi.width+border*2)*(roi.height+border*2), image.type());

    warped = Mat(roi.height + border*2, roi.width + border*2,
                 image.type(), buf.data);

    T(0,2) += dx;
    T(1,2) += dy;
    (*this)(image, T, warped, warped.size(), rng);

    if( T.data != matT.data )
        T.convertTo(matT, matT.type());
}
Example #27
0
/**
 * @function MatchingMethod
 * @brief Trackbar callback
 */
void MatchingMethod( int, void* )
{
    /// Source image to display
    Mat img_display;
    img.copyTo( img_display );
    
    /// Create the result matrix
    int result_cols =  img.cols - templ.cols + 1;
    int result_rows = img.rows - templ.rows + 1;
    
    result.create( result_rows, result_cols, CV_32FC1 );
    
    /// Do the Matching and Normalize
    matchTemplate( img, templ, result, match_method );
    normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
    
    /// Localizing the best match with minMaxLoc
    double minVal; double maxVal; Point minLoc; Point maxLoc;
    Point matchLoc;
    
    minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
    
    
    /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
    if( match_method  == TM_SQDIFF || match_method == TM_SQDIFF_NORMED )
    { matchLoc = minLoc; }
    else
    { matchLoc = maxLoc; }
    
    /// Show me what you got
    rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
    rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
    resize(img_display, img_display, Size(640,360));
    resize(result, result, Size(640,360));
    imshow( image_window, img_display );
    
    return;
}
int main (int argc, char** argv) {
  /// Read the image
  std::string pattern("pattern.png");
  /// Load source image and convert it to gray
  if (argc < 1) {
    std::cout << " Usage : ./backprojection.out image_name" << std::endl;
    std::cout << " Loading default image ! " << std::endl;
  }
  else
    pattern = std::string(argv[1]);

  src = cv::imread( pattern , 1 );

  if (!src.data) {
    std::cout << "Failed to read the image!" << std::endl;
    return -1;
  }

  /// Transform it to HSV
  cvtColor(src, hsv, CV_BGR2HSV);

  /// Use only the Hue value
  hue.create(hsv.size(), hsv.depth());
  int ch[] = {0, 0};
  mixChannels(&hsv, 1, &hue, 1, ch, 1);

  /// Create Trackbar to enter the number of bins
  
  createTrackbar("* Hue  bins: ", "Source image", &bins, 180, Hist_and_Backproj);
  Hist_and_Backproj(0, 0);

  /// Show the image
  imshow("Source image", src);

  /// Wait until user exits the program
  waitKey(0);
  return 0;
}
Example #29
0
bool Match::templateMatch(Mat *img, Mat *templ, int match_method)
{
	/// Source image to display
	Mat img_display;
	img->copyTo( img_display );
	Mat result;

	/// Create the result matrix
	int result_cols =  img->cols - templ->cols + 1;
	int result_rows = img->rows - templ->rows + 1;

	result.create( result_cols, result_rows, CV_32FC1 );

	/// Do the Matching and Normalize
	matchTemplate( *img, *templ, result, match_method );
	normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );

	/// Localizing the best match with minMaxLoc
	double minVal; double maxVal; Point minLoc; Point maxLoc;
	Point matchLoc;

	minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );

	/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
	if( match_method  == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
	{ matchLoc = minLoc; }
	else
	{ matchLoc = maxLoc; }

	/// Show me what you got
	rectangle( img_display, matchLoc, Point( matchLoc.x + templ->cols , matchLoc.y + templ->rows ), Scalar::all(0), 2, 8, 0 );
	rectangle( result, matchLoc, Point( matchLoc.x + templ->cols , matchLoc.y + templ->rows ), Scalar::all(0), 2, 8, 0 );

	imshow("Image", img_display);
	imshow("Result", result);
	waitKey(0);
	return true;
}
Example #30
0
int main(int argc, char *argv[])
{

    if(argc<2)
        cerr << "Usage : ColorConstancy filename " << endl;



    Mat image=imread(argv[1]);

    cerr << "reading image " << argv[2] << endl;

    color_correction::contrast_stretching a;
    color_correction::gray_world b1;
    color_correction::gray_edge b2;
    color_correction::maxRGB b3;
    color_correction::max_edge b4;

    Mat input;
    input.create(240,320,CV_8UC(3));
    resize(image,input, input.size(), 0, 0, INTER_NEAREST);



    imshow("original",input);
    imshow("contrast stretching",a.run(input));
    imshow("gray world RGB",b1.run2(input,1,2));
    imshow("gray world Lab",b1.run1(input,1));
    imshow("Shades of gray",b1.run2(input,6,2));
    imshow("maxRGB",b3.run(input,6,0));
    imshow("gray edge",b2.run(input,1,0));
    imshow("max edge",b4.run(input,1,0));



    cv::waitKey(0);

}