Exemplo n.º 1
1
void opticalFlow::occMatpEst( Mat_<Vec2f> &flow12, Mat_<Vec2f> &flow21, Mat_<uchar>&occMap)
{
	int iy, ix;

	const float FLOW_PIXEL_THRESH = 2;

	occMap.setTo(255);
	for (iy=0; iy<height1; ++iy)
	{
		for (ix=0; ix<width1; ++ix)
		{
			Vec2f fFlow = flow12[iy][ix];
			int ny, nx;
			ny = floor(iy+fFlow[1]+0.5);
			nx = floor(ix+fFlow[0]+0.5);

			if (ny>=0 && ny<height1 && nx>=0 && nx<width1) 
			{
				cv::Vec2f bFlow = flow21[ny][nx];
				if (fabs(bFlow[1]+ny-iy)<FLOW_PIXEL_THRESH && fabs(bFlow[0]+nx-ix)<FLOW_PIXEL_THRESH)
				{
					continue;
				}
			}
			occMap[iy][ix] = 0;
		}
	}

	Mat bw = occMap;
    Mat labelImage(occMap.size(), CV_32S);
    int nLabels = connectedComponents(bw, labelImage, 8);

	occMap[iy][ix] = 0;
	vector<int> hist(nLabels,0);
	for (iy=0; iy<height1; ++iy)
		for (ix=0; ix<width1; ++ix)
			hist[labelImage.at<int>(iy,ix)]++;
	vector<int> rmv_list;
	rmv_list.reserve(20);
	for (int i=0;i<nLabels;++i){
		if (hist[i]<50)
			rmv_list.push_back(i);
	}
	for (iy=0; iy<height1; ++iy)
	{
		for (ix=0; ix<width1; ++ix)
		{
			for (int r=0; r<rmv_list.size(); ++r)
			if(labelImage.at<int>(iy,ix) == rmv_list[r])
				occMap[iy][ix] = 0;
		}
	}
}
Exemplo n.º 2
0
/* Assigns Ix, Iy, It gradient values computed from imgA and imgB. */
void compute_derivatives(const Mat_<float>& imgA, const Mat_<float>& imgB, 
			 Mat_<float>& Ix, Mat_<float>& Iy, Mat_<float>& It) {
  
	int channels = 1; //assume grayscale images

	int nRows = imgA.rows; //assume same size of imgA and imgB
	int nCols = imgA.cols * channels;

	Ix = Mat::zeros(imgA.size(), CV_32F);
	Iy = Mat::zeros(imgA.size(), CV_32F);
	It = Mat::zeros(imgA.size(), CV_32F);

	float *p_a1, *p_a2, *p_b1, *p_b2;
	float *p_ix, *p_iy, *p_it;
	
	p_a1 = (float*)imgA.ptr<float>(0); //cast is needed (const uchar* to uchar*)
	p_b1 = (float*)imgB.ptr<float>(0);
	p_a2 = p_a1 + nCols;
	p_b2 = p_b1 + nCols;
	p_ix = Ix.ptr<float>(0);
	p_iy = Iy.ptr<float>(0);
	p_it = It.ptr<float>(0);
	for ( int j = 0; j < nCols*(nRows-1); ++j)
	{       
		//calculate spatiotemporal averages like Horn & Schunck				//up at right-hand side x-boundary)
		p_ix[j] = 0.25 * (p_a1[j+1] - p_a1[j]+p_a2[j+1]-p_a2[j]
				+p_b1[j+1]-p_b1[j]+p_b2[j+1]-p_b2[j]);
		p_iy[j] = 0.25 * (p_a2[j]-p_a1[j]+p_a2[j+1]-p_a1[j+1]
				+p_b2[j]-p_b1[j]+p_b2[j+1]-p_b1[j+1]);
		p_it[j] = 0.25 * (p_b1[j] - p_a1[j]+p_b1[j+1]-p_a1[j+1]
				+p_b2[j]-p_a2[j]+p_b2[j+1]-p_a2[j+1]);
		
	}
}
Exemplo n.º 3
0
LaplacianBlending::LaplacianBlending(const Mat_<Vec3f>& _left, const Mat_<Vec3f>& _right, int _levels)://construct function, used in LaplacianBlending lb(l,r,m,4);  
left(_left),right(_right),levels(_levels)  
{  
	assert(_left.size() == _right.size());  

	buildPyramids();  //construct Laplacian Pyramid and Gaussian Pyramid  
	blendLapPyrs();   //blend left & right Pyramids into one Pyramid  
};  
Exemplo n.º 4
0
LaplacianBlending::LaplacianBlending(const Mat_<Vec3f>& _left, const Mat_<Vec3f>& _right, const Mat_<float>& _leftMask, const Mat_<float>& _rightMask, int _levels):
left(_left),right(_right),rightMask(_rightMask),leftMask(_leftMask),levels(_levels)
{
    assert(_left.size() == _right.size());
    assert(_left.size() == _leftMask.size());
    assert(_rightMask.size() == _leftMask.size());
    buildPyramids();
    blendLapPyrs();
};
Exemplo n.º 5
0
SLAM::LineFeature::LineFeature(const Mat_<imtype> & im, const CameraState & state,
			       const Point2f & pt2d, int rx, int ry)
  :descriptor(im(Range(max(0, iround(pt2d.y-ry)),
		       min(im.size().height, iround(pt2d.y+ry+1))),
		 Range(max(0, iround(pt2d.x-rx)),
		       min(im.size().width, iround(pt2d.x+rx+1)))).clone()),
   cone(state.getLocalCoordinatesPoint(pt2d), state.t, 3.f, state.f, 5,
	100, 20, 3) /*TODO: cone parameters*/,
   timeSinceLastSeen(1) {
}
Exemplo n.º 6
0
void regularize(Mat_<uchar> &gray,
				BBox &bbox, Pose &P,
				Mat_<double> &shape,
				Mat_<uchar> &lEye,
				Mat_<uchar> &rEye)
{
	double w = bbox.w;
	double h = bbox.h;
	double scale = 1.5;
	double minx = max(0.0, bbox.x - (scale - 1)*w / 2);
	double miny = max(0.0, bbox.y - (scale - 1)*h / 2);
	double maxx = min(gray.cols - 1.0, minx + scale*w);
	double maxy = min(gray.rows - 1.0, miny + scale*h);
	w = maxx - minx;
	h = maxy - miny;

	BBox oldbox(minx, miny, w, h);
	BBox newbox(0, 0, w, h);

	Mat_<uchar> faceImg = gray(Rect(minx, miny, w, h));
	Mat_<uchar> regFace = Mat_<uchar>::zeros(faceImg.size());
	Mat rotMat = getRotationMatrix2D(Point2f(w / 2.0, h / 2.0), P.roll, 1.0);
	warpAffine(faceImg, regFace, rotMat, faceImg.size());

	double cosa = rotMat.at<double>(0, 0);
	double sina = rotMat.at<double>(0, 1);
	Mat_<double> projShape = projectShape(shape, oldbox);
	Mat_<double> newShape = Mat_<double>::zeros(shape.size());
	for (int i = 0; i < shape.rows; i++) {
		newShape(i, 0) = cosa*projShape(i, 0) + sina*projShape(i, 1);
		newShape(i, 1) = -sina*projShape(i, 0) + cosa*projShape(i, 1);
	}
	newShape = reprojectShape(newShape, newbox);

	// get eye images & data
	Rect lRect = findEyeRect(regFace, newShape, LEFT_EYE);
	Rect rRect = findEyeRect(regFace, newShape, RIGHT_EYE);
	vector<Point> lCont = findEyeCont(lRect, newShape, LEFT_EYE);
	vector<Point> rCont = findEyeCont(rRect, newShape, RIGHT_EYE);
	lEye = regFace(lRect);
	rEye = regFace(rRect);
	// set mask and crop
	Mat_<uchar> lMask = findEyeMask(lEye, lCont);
	Mat_<uchar> rMask = findEyeMask(rEye, rCont);
	//lEye.setTo(0, ~lMask);
	//rEye.setTo(0, ~rMask);
	normalize(lEye, lEye, 255, 0, NORM_MINMAX);
	normalize(rEye, rEye, 255, 0, NORM_MINMAX);
	// resize to proper size
	resize(lEye, lEye, Size(64, 32));
	resize(rEye, rEye, Size(64, 32));
	
}
Exemplo n.º 7
0
Mat_< float > Saliency::assignFilter( const Mat_< Vec3b >& im, const Mat_< int >& seg, const vector< SuperpixelStatistic >& stat, const std::vector< float >& sal ) const {
	std::vector< float > source_features( seg.size().area()*5 ), target_features( im.size().area()*5 );
	Mat_< Vec2f > data( seg.size() );
	// There is a type on the paper: alpha and beta are actually squared, or directly applied to the values
	const float a = settings_.alpha_, b = settings_.beta_;
	
	const int D = 5;
	// Create the source features
	for( int j=0,k=0; j<seg.rows; j++ )
		for( int i=0; i<seg.cols; i++, k++ ) {
			int id = seg(j,i);
			data(j,i) = Vec2f( sal[id], 1 );
			
			source_features[D*k+0] = a * i;
			source_features[D*k+1] = a * j;
			if (D == 5) {
				source_features[D*k+2] = b * stat[id].mean_rgb_[0];
				source_features[D*k+3] = b * stat[id].mean_rgb_[1];
				source_features[D*k+4] = b * stat[id].mean_rgb_[2];
			}
		}
	// Create the source features
	for( int j=0,k=0; j<im.rows; j++ )
		for( int i=0; i<im.cols; i++, k++ ) {
			target_features[D*k+0] = a * i;
			target_features[D*k+1] = a * j;
			if (D == 5) {
				target_features[D*k+2] = b * im(j,i)[0];
				target_features[D*k+3] = b * im(j,i)[1];
				target_features[D*k+4] = b * im(j,i)[2];
			}
		}
	
	// Do the filtering [Filtering using the target features twice works slightly better, as the method described in our paper]
	if (settings_.use_spix_color_) {
		Filter filter( source_features.data(), seg.cols*seg.rows, target_features.data(), im.cols*im.rows, D );
		filter.filter( data.ptr<float>(), data.ptr<float>(), 2 );
	}
	else {
		Filter filter( target_features.data(), im.cols*im.rows, D );
		filter.filter( data.ptr<float>(), data.ptr<float>(), 2 );
	}
	
	Mat_<float> r( im.size() );
	for( int j=0; j<im.rows; j++ )
		for( int i=0; i<im.cols; i++ )
			r(j,i) = data(j,i)[0] / (data(j,i)[1] + 1e-10);
	return r;
}
Exemplo n.º 8
0
Arquivo: main.cpp Projeto: KAlO2/Pea
Mat_<uchar> transformModifiedLUX(Mat_<Vec3b> image_BGR)
{
    Mat_<uchar> Ucap(image_BGR.size());

    int B = 0, G = 0, R = 0, u_cap_int = 0;
    double u_cap = 0.0;
    for(int i = 0; i < image_BGR.rows; ++i)
    {
        for(int j = 0; j < image_BGR.cols; ++j)
        {
            B = image_BGR(i, j)[0];
            G = image_BGR(i, j)[1];
            R = image_BGR(i, j)[2];

            if(R > G)
            {
                u_cap = (256*G) / R;
                u_cap_int = round(u_cap);
                Ucap.at<uchar>(i, j) = u_cap_int;
            }
            else
                Ucap.at<uchar>(i, j) = 255;
        }
    }
    return Ucap;
}
Exemplo n.º 9
0
Arquivo: main.cpp Projeto: KAlO2/Pea
Mat_<uchar> transformLUX(Mat_<Vec3b> image_BGR)
{
    Mat_<uchar> U(image_BGR.size());

    int B = 0, G = 0, R = 0, L_int = 0, u_int = 0;
    double L = 0.0, u = 0.0;
    for(int i = 0; i < image_BGR.rows; ++i)
    {
        for(int j = 0; j < image_BGR.cols; ++j)
        {
            B = image_BGR(i, j)[0];
            G = image_BGR(i, j)[1];
            R = image_BGR(i, j)[2];

            L = (pow(R+1, 0.3) * pow(G+1, 0.6) * pow(B+1, 0.1)) - 1;
            L_int = round(L);

            if(R > L_int)
            {
                u = (256 * (L_int+1)) / (R + 1);
                u_int = round(u);
                U.at<uchar>(i, j) = u_int;
            }
            else
                U.at<uchar>(i, j) = 255;
        }
    }
    return U;
}
Exemplo n.º 10
0
void DetectionTool::getMove(const Mat_<Point2f>& flow, Mat& dst, float minmotion, int color)
{
    dst.create(flow.size(), CV_8UC1);
    dst.setTo(Scalar::all(0));

    //Permet de ne pas calculer la racine carrée pour chaque longueur de vecteur de mouvement.
    //Si minmotion <= 0, on veut sélectionner tous les mouvements.
    if(minmotion > 0)
        minmotion *= minmotion;

    //Parcours de la matrice des mouvements
    for (int y = 0; y < flow.rows; ++y)
    {
        for (int x = 0; x < flow.cols; ++x)
        {
            Point2f u = flow(y, x);
            if (isFlowCorrect(u))
            {
                //la comparaison se fait sans la mise sous racine car on a élevé au carré minmotion
                if(u.x * u.x + u.y * u.y > minmotion)
                {
                    dst.at<char>(y, x) = color;
                }
            }
        }
    }
}
Exemplo n.º 11
0
void add_feature(const Mat &image, const char *name)
{
    assert (prediction.size() == image.size());
    int found = 0;
    string _name = string(name);
    cout << "Got feature " << name << endl;
    for (int wi = 0; wi < weak_learners.size(); wi++) {
        if (weak_learners[wi].feature_name == _name) {
            found = 1;
            float *score_ptr = prediction.ptr<float>(0);
            const float *feature_ptr = image.ptr<float>(0);
            float thresh = weak_learners[wi].threshold;
            float left_val = weak_learners[wi].left_val;
            float right_val = weak_learners[wi].right_val;
            for (int i = 0; i < prediction.total(); i++, feature_ptr++, score_ptr++)
                    *score_ptr += ((*feature_ptr <= thresh) ? left_val : right_val);
        }
    }
    if (! found)
        cout << "Didn't find any uses of feature " << name << endl;

    
    // remove old weak learners from consideration
    weak_learners.erase(remove(weak_learners.begin(), weak_learners.end(), name), weak_learners.end());

    if (should_save(name))
        write_feature(h5f, image, name);
}
Exemplo n.º 12
0
void iterative_computation(Mat_<float>& u, Mat_<float>& v, const Mat_<float>& Ix, 
			   const Mat_<float>& Iy, const Mat_<float>& It) {
	if (METHOD == 0) { // Jacobi Methd
	    Mat_<float> f =  (Mat_<float>(3,3) << 1.0/12.0, 1.0/6.0, 1.0/12.0, 1.0/6.0, 0.0, 1.0/6.0, 
					  1.0/12.0, 1.0/6.0, 1.0/12.0);
	    Mat_<float> avg_u, avg_v;
	    filter2D(u, avg_u, -1 , f, Point(-1, -1), 0, BORDER_DEFAULT );
	    filter2D(v, avg_v, -1 , f, Point(-1, -1), 0, BORDER_DEFAULT );
	    Mat_<float> d1 = Ix.mul(avg_u) + Iy.mul(avg_v) + It;
	    Mat_<float> d2 = Mat::ones(u.size(), CV_32F) * ALPHA * ALPHA + Ix.mul(Ix) + Iy.mul(Iy);
	    Mat_<float> r = d1.mul(1 / d2);
	    u = avg_u - Ix.mul(r);
	    v = avg_v - Iy.mul(r);
	}
	else if (METHOD == 1) { // Gauss-Seidel method 
	    for (int i = 1; i < u.rows - 1; i ++) {
		for (int j = 1; j < u.cols - 1; j ++) {
			float avg_u = 1.f / 6.f * (u.at<float>(i - 1, j) + u.at<float>(i, j + 1) + 
					       u.at<float>(i + 1, j) + u.at<float>(i, j - 1)) +
				      1.f / 12.f * (u.at<float>(i - 1, j -1) + u.at<float>(i - 1, j + 1) + 
						u.at<float>(i + 1, j - 1) + u.at<float>(i + 1, j + 1));
			float avg_v = 1.f / 6.f * (v.at<float>(i - 1, j) + v.at<float>(i, j + 1) + 
					       v.at<float>(i + 1, j) + v.at<float>(i, j - 1)) +
				      1.f / 12.f * (v.at<float>(i - 1, j -1) + v.at<float>(i - 1, j + 1) + 
						v.at<float>(i + 1, j - 1) + v.at<float>(i + 1, j + 1));
			float ix = Ix.at<float>(i, j);
			float iy = Iy.at<float>(i, j);
			float it = It.at<float>(i, j);
			float r = (ix * avg_u + iy * avg_v + it) / 
				  (ALPHA * ALPHA + ix * ix + iy * iy);
			
			u.at<float>(i, j) = avg_u - ix * r;
			v.at<float>(i, j) = avg_v - iy * r;
		}
	    }
	} 

	else if (METHOD == 2) { // Successive overrelaxation method 
	    for (int i = 1; i < u.rows - 1; i ++) {
		for (int j = 1; j < u.cols - 1; j ++) {
			float avg_u = 1.f / 6.f * (u.at<float>(i - 1, j) + u.at<float>(i, j + 1) + 
					       u.at<float>(i + 1, j) + u.at<float>(i, j - 1)) +
				      1.f / 12.f * (u.at<float>(i - 1, j -1) + u.at<float>(i - 1, j + 1) + 
						u.at<float>(i + 1, j - 1) + u.at<float>(i + 1, j + 1));
			float avg_v = 1.f / 6.f * (v.at<float>(i - 1, j) + v.at<float>(i, j + 1) + 
					       v.at<float>(i + 1, j) + v.at<float>(i, j - 1)) +
				      1.f / 12.f * (v.at<float>(i - 1, j -1) + v.at<float>(i - 1, j + 1) + 
						v.at<float>(i + 1, j - 1) + v.at<float>(i + 1, j + 1));
			float ix = Ix.at<float>(i, j);
			float iy = Iy.at<float>(i, j);
			float it = It.at<float>(i, j);
			float r = (ix * avg_u + iy * avg_v + it) / 
				  (ALPHA * ALPHA + ix * ix + iy * iy);
			
			u.at<float>(i, j) = (1 - WEIGHT) * u.at<float>(i, j) + WEIGHT * (avg_u - ix * r);
			v.at<float>(i, j) = (1 - WEIGHT) * v.at<float>(i, j) + WEIGHT * (avg_v - iy * r);
		}
	    }
	}			   
}  
Exemplo n.º 13
0
void fft2(Mat_<float> src)
{
	int x = getOptimalDFTSize(2* src.rows );
	int y = getOptimalDFTSize(2* src.cols );
	copyMakeBorder(src, src, 0, (x - src.rows), 0, (y - src.cols), BORDER_CONSTANT, Scalar::all(0));
	     // Get padded image size
	     const int wx = src.cols, wy = src.rows;
	     const int cx = wx/2, cy = wy/2;

	//--------------------------------//
	// DFT - performing		     //
	cv::Mat_<float> imgs[] = {src.clone(), Mat::zeros(src.size(), CV_32F)};
	cv::Mat_<cv::Vec2f> img_dft;
	merge(imgs,2,img_dft);
	dft(img_dft, img_dft);
	split(img_dft,imgs);
	cv::Mat_<float> magnitude, phase;
	cartToPolar(imgs[0],imgs[1],magnitude,phase);
	dftshift(magnitude);
	magnitude = magnitude + 1.0f;
	log(magnitude,magnitude);
	normalize(magnitude,magnitude,0,1,CV_MINMAX);
	namedWindow("img_dft",WINDOW_NORMAL);
	imshow("img_dft",magnitude);
	waitKey(0);
	cout << "out" << endl;
}
Exemplo n.º 14
0
int copyMakeBorder(/*const*/ Mat_<_Tp, chs>& src, Mat_<_Tp, chs>& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value = Scalar())
{
	FBC_Assert(top >= 0 && bottom >= 0 && left >= 0 && right >= 0);

	if (src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0) {
		Size wholeSize;
		Point ofs;
		src.locateROI(wholeSize, ofs);
		int dtop = std::min(ofs.y, top);
		int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
		int dleft = std::min(ofs.x, left);
		int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
		src.adjustROI(dtop, dbottom, dleft, dright);
		top -= dtop;
		left -= dleft;
		bottom -= dbottom;
		right -= dright;
	}

	if (dst.empty() || dst.rows != (src.rows + top + bottom) || dst.cols != (src.cols + left + right)) {
		dst.release();
		dst = Mat_<_Tp, chs>(src.rows + top + bottom, src.cols + left + right);
	}

	if (top == 0 && left == 0 && bottom == 0 && right == 0) {
		if (src.data != dst.data || src.step != dst.step)
			src.copyTo(dst);
		return 0;
	}

	borderType &= ~BORDER_ISOLATED;

	if (borderType != BORDER_CONSTANT) {
		copyMakeBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, src.elemSize(), borderType);
	} else {
		int cn = src.channels, cn1 = cn;
		AutoBuffer<double> buf(cn);

		scalarToRawData<_Tp, chs>(value, buf, cn);
		copyMakeConstBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, (int)src.elemSize(), (uchar*)(double*)buf);
	}

	return 0;
}
Exemplo n.º 15
0
Mat_< Vec3f > Superpixel::visualizeRandom( const Mat_< int >& segmentation ) const {
	srand( segmentation.size().area() );
	
	int n_label = nLabels( segmentation );
	
	std::vector< Vec3f > colors;
	for( int k=0; k<n_label; k++ )
		colors.push_back( Vec3f( 1.0*rand()/RAND_MAX, 1.0*rand()/RAND_MAX, 1.0*rand()/RAND_MAX ) );
	
	return assign( colors, segmentation );
}
Exemplo n.º 16
0
Mat_< float > Saliency::saliency( const Mat_< Vec3b >& im ) const {
	// Convert the image to the lab space
	Mat_<Vec3f> rgbim, labim;
	im.convertTo( rgbim, CV_32F, 1.0/255. );
	cvtColor( rgbim, labim, CV_BGR2Lab );
	
	// Superpixel superpixel_( 300, 50.0 );
	// Do the abstraction
	Mat_<int> segmentation = superpixel_.segment( labim );
	std::vector< SuperpixelStatistic > stat = superpixel_.stat( labim, im, segmentation );
	
	// Compute the uniqueness
	std::vector<float> unique( stat.size(), 1 );
	if (settings_.uniqueness_) {
		if (settings_.filter_uniqueness_)
			unique = uniquenessFilter( stat );
		else
			unique = uniqueness( stat );
	}
	
	// Compute the distribution
	std::vector<float> dist( stat.size(), 0 );
	if (settings_.distribution_) {
		if (settings_.filter_distribution_)
			dist = distributionFilter( stat );
		else
			dist = distribution( stat );
	}
	
	// Combine the two measures
	std::vector<float> sp_saliency( stat.size() );
	for( int i=0; i<stat.size(); i++ )
		sp_saliency[i] = unique[i] * exp( - settings_.k_ * dist[i] );
	
	// Upsampling
	Mat_<float> r;
	if (settings_.upsample_)
		r = assignFilter( im, segmentation, stat, sp_saliency );
	else
		r = assign( segmentation, sp_saliency );
	
	// Rescale the saliency to [0..1]
	double mn, mx;
	minMaxLoc( r, &mn, & mx );
	r = (r - mn) / (mx - mn);
	
	// Increase the saliency value until we are below the minimal threshold
	double m_sal = settings_.min_saliency_ * r.size().area();
	for( float sm = sum( r )[0]; sm < m_sal; sm = sum( r )[0] )
		r =  min( r*m_sal/sm, 1.0f );
	
	return r;
}
Exemplo n.º 17
0
Arquivo: main.cpp Projeto: KAlO2/Pea
// Extract the pseudo-hue plane
Mat_<uchar> transformPseudoHue(Mat_<Vec3b> image)
{
    Mat_<double> pseudo_hue(image.size());
    Mat_<uchar> pseudo_hue_norm(image.size());

    int B = 0, G = 0, R = 0;
    for(int i = 0; i < image.rows; ++i)
    {
        for(int j = 0; j < image.cols; ++j)
        {
            B = image(i, j)[0];
            G = image(i, j)[1];
            R = image(i, j)[2];

            if(R == 0)
                pseudo_hue.at<double>(i, j) = 0.0;
            else
                pseudo_hue.at<double>(i, j) = (double)R / (R + G);
        }
    }
    std::pair<double, double> statistics = Stats(pseudo_hue);
    double Hmin = statistics.first;
    double Hmax = statistics.second;
    double Hrange = (Hmax - Hmin);

    for(int i = 0; i < image.rows; ++i)
    {
        for(int j = 0; j < image.cols; ++j)
        {
            double  temp = (pseudo_hue.at<double>(i, j) - Hmin) / Hrange;
            pseudo_hue_norm.at<uchar>(i, j) = round(temp * 255);
        }
    }

    return pseudo_hue_norm;
}
Exemplo n.º 18
0
/*
	Given a sensor grid to draw on and a map co-ordinate locating the hit,

	draws the patch centered at the proper grid index
	if ADDITIVE is #defined, the gaussian patch is added to the occupancy grid
	if ADDITIVE isnt #defined, the occupancy grid is set to the value in the patch unless it already has a higher value
*/
void drawHit(Mat_<uchar>& grid, Point2f hit) {
    static int radius = static_cast<int>(fattening / gridRes * (1 << fixedPoints));
    static Mat_<uchar> patch = patchInit();


    Point center = pointToGridPoint(hit);	//roi doesn't support subpixel precision
    Rect roi_ = Rect(center.x-radius/2.0,center.y-radius/2.0, radius,radius);	//rect bounding patch and centered on hit
    Rect roi = roi_ & gridMatBounds;	//intersection of roi and map boundary
    if(roi.size() != roi_.size())	return;	//do not draw patches if they don't entirely fit on the map because it is inconvenient to do so

    Mat_<uchar> t(grid,roi);
    if(t.size()!= patch.size())	return; //do not draw patches if they don't entirely fit on the map because it is inconvenient to do so
    //ROS_INFO("drawing a hit");	//don't print this out because it is slow
    if(ADDITIVE)	t+=patch;
    else		t= max(t, patch);
}
Exemplo n.º 19
0
/*
 * Function: scan
 * ----------------------------
 *   Scannt einen UPC Code, gibt die Zahl zurueck
 *
 *   img: Bild mit dem Barcode
 *
 *   returns: Barcode als Zahl
 */
deque<int> BarcodeScannerEAN::scan(Mat_<uchar> img)
{

    Point currPos(0, img.size().height / 2);
    int unitWidth = scanFirstGuard(img, currPos);
    deque<int> codeNumbers;
    bool oddParity[6] = {0, 0, 0, 0, 0, 0};

    //Den linken Bereich auslesen
    for (int i = 0; i < DIGIT_COUNT; ++i)
    {
        int num = readDigit(img, currPos, unitWidth, LEFT);
        if (num <= 10)
        {
            oddParity[i] = 1;
        }
        codeNumbers.push_back(num % 10);
        correctReaderPos(img, currPos, SPACE, BAR);
    }

    //Linken berech auf Kodierung ueberpruefen
    for (int i = 0; i < 10; i++)
    {
        if (matchingParityPattern(oddParity, i))
        {
            codeNumbers.push_front(i);
        }
    }

    //Den mittleren Bereich ueberspringen
    for (int i = 0; i < 5; ++i)
    {
        skipPart(img, currPos);
    }

    //Den rechten Bereich auslesen
    for (int i = 0; i < DIGIT_COUNT; ++i)
    {
        int num = readDigit(img, currPos, unitWidth, RIGHT);
        codeNumbers.push_back(num);
        correctReaderPos(img, currPos, BAR, SPACE);
    }

    return codeNumbers;
}
Exemplo n.º 20
0
Arquivo: main.cpp Projeto: KAlO2/Pea
Mat_<uchar> binaryThresholding(const Mat_<uchar>& image, const std::pair<double, double>& stats)
{
    Mat_<uchar> image_binary(image.size());

    double Z = 0.9;
    double threshold = stats.first + (Z * stats.second);
    for(int i = 0; i < image.rows; ++i)
    {
        for(int j = 0; j < image.cols; ++j)
        {
            if(image.at<uchar>(i, j) >= threshold + std::numeric_limits<double>::epsilon())
                image_binary.at<uchar>(i, j) = 255;
            else
                image_binary.at<uchar>(i, j) = 0;
        }
    }
    return image_binary;
}
static Mat endpointError( const Mat_<Point2f>& flow1, const Mat_<Point2f>& flow2 )
{
    Mat result(flow1.size(), CV_32FC1);
    for ( int i = 0; i < flow1.rows; ++i )
    {
        for ( int j = 0; j < flow1.cols; ++j )
        {
            const Point2f u1 = flow1(i, j);
            const Point2f u2 = flow2(i, j);

            if ( isFlowCorrect(u1) && isFlowCorrect(u2) )
            {
                const Point2f diff = u1 - u2;
                result.at<float>(i, j) = sqrt((float)diff.ddot(diff)); //distance
            } else
                result.at<float>(i, j) = std::numeric_limits<float>::quiet_NaN();
        }
    }
    return result;
}
static Mat angularError( const Mat_<Point2f>& flow1, const Mat_<Point2f>& flow2 )
{
    Mat result(flow1.size(), CV_32FC1);

    for ( int i = 0; i < flow1.rows; ++i )
    {
        for ( int j = 0; j < flow1.cols; ++j )
        {
            const Point2f u1_2d = flow1(i, j);
            const Point2f u2_2d = flow2(i, j);
            const Point3f u1(u1_2d.x, u1_2d.y, 1);
            const Point3f u2(u2_2d.x, u2_2d.y, 1);

            if ( isFlowCorrect(u1) && isFlowCorrect(u2) )
                result.at<float>(i, j) = acos((float)(u1.ddot(u2) / norm(u1) * norm(u2)));
            else
                result.at<float>(i, j) = std::numeric_limits<float>::quiet_NaN();
        }
    }
    return result;
}
bool intersects(const vector<Point> &contour, const Mat_<uchar> &mask)
{
    Mat_<uchar> c = Mat_<uchar>::zeros(mask.size());
    fillConvexPoly(c, contour.data(), contour.size(), 255);

    /*
    Below is:

    bitwise_and(mask, c, c);
    return countNonZero(c);

    optimized.
    */

    auto it_m = mask.begin();
    auto it_c = c.begin();
    while (it_m != mask.end()) {
        if (*it_m && *it_c) return true;
        ++it_m, ++it_c;
    }

    return false;
}
Exemplo n.º 24
0
void getLaplacianPyramid(const Mat_<Vec3f>& image, vector<Mat_<Vec3f> >& laplacianPyramid,
                         int numLevels) {
    //laplacianPyramid.push_back(image);
    // Construct each level of the pyramid
    Mat_<Vec3f> curImage;
    //image.convertTo(curImage, CV_32FC3);
    curImage = image;
    for (int level = 0; level < numLevels; level++) {
        if (level < numLevels - 1) {
            Mat_<Vec3f> newLevel;
            Mat_<Vec3f> down_tmp, up_tmp;
            pyrDown(curImage, down_tmp);
            pyrUp(down_tmp, up_tmp, curImage.size());
            newLevel = curImage - up_tmp;
            laplacianPyramid.push_back(newLevel);
            curImage = down_tmp;
        } else {
            // Top level of laplacian pyramid is just a Gaussian-blurred
            // image (not difference of gaussian/laplacian)
            laplacianPyramid.push_back(curImage);
        }
    }
}
Exemplo n.º 25
0
Mat optical_flow(const Mat_<float>& ImgA, const Mat_<float>& ImgB, 
		 int num_it, float threshold) {

	// Compute Gaussin Pyramid 
	int nl = 5;
	float ds = 0.5;
	stack<pair<Mat, Mat> > gp = compute_gaussian_pyramids(ImgA, ImgB, nl, ds);
	
	Mat_<float> u = Mat::zeros(ImgA.size(), CV_32F);
	Mat_<float> v = Mat::zeros(ImgA.size(), CV_32F);
	while (!gp.empty()) {
	    Mat imgA = (gp.top()).first;
	    Mat imgB = (gp.top()).second;
	    
	    // Warp the first image. 
	    Mat_<float> imgBw = imgB.clone();
	   
	    /* Compute warping here from u and v. */
	    imgBw = compute_warp(imgB, u, v, ds);

	    /* Compute the derivatives. */
	    Mat_<float> Ix, Iy, It;
	    compute_derivatives(imgBw, imgA, Ix, Iy, It); // papers       
	    Mat_<float> du = Mat::zeros(imgA.size(), CV_32F);
	    Mat_<float> dv = Mat::zeros(imgA.size(), CV_32F);
	    for (int i = 0; i < 500; i ++) 
		  iterative_computation(du, dv, Ix, Iy, It);

	    u = u - du;
	    v = v - dv;
	    gp.pop();
	}
	
	Mat Mflow = color_map(u, v);
	return Mflow;
}
Exemplo n.º 26
0
	static void drawOpticalFlow(const Mat_<Point2f>& flow, Mat& dst, float maxmotion = -1)
	{
	    dst.create(flow.size(), CV_8UC3);
	    dst.setTo(Scalar::all(0));
	
	    // determine motion range:
	    float maxrad = maxmotion;
	
	    if (maxmotion <= 0)
	    {
	        maxrad = 1;
	        for (int y = 0; y < flow.rows; ++y)
	        {
	            for (int x = 0; x < flow.cols; ++x)
	            {
	                Point2f u = flow(y, x);
	
	                if (!isFlowCorrect(u))
	                    continue;
	
	                maxrad = max(maxrad, sqrt(u.x * u.x + u.y * u.y));
	            }
	        }
	    }
	
	    for (int y = 0; y < flow.rows; ++y)
	    {
	        for (int x = 0; x < flow.cols; ++x)
	        {
	            Point2f u = flow(y, x);
	
	            if (isFlowCorrect(u))
	                dst.at<Vec3b>(y, x) = computeColor(u.x / maxrad, u.y / maxrad);
	        }
	    }
	}
Exemplo n.º 27
0
// We are implementing SLIC here. I'm too lazy to enfoce the connectivity
Mat_< int > Superpixel::slic( const Mat_< Vec3f >& im ) const {
	// Compute the spacing and grid size of the superpixels
	double sp_area = 1.0 * im.cols * im.rows / K_;
	int Kx = 0.5 + im.cols / sqrt( sp_area ), Ky = 0.5 + im.rows / sqrt( sp_area );
	int K = Kx*Ky;
	
	int win_sz = 1.0 * sqrt(sp_area) + 1;
	
	// Initialize the seeds on a regular grid
	std::vector< int64_t > cnt( K );
	std::vector< Point2d > seedsd( K );
	std::vector< Point > seeds( K );
	for( int i=0,k=0; i<Kx; i++ )
		for( int j=0; j<Ky; j++, k++ )
			seeds[k] = Point( (i+0.5)*(im.cols-1)/Kx, (j+0.5)*(im.rows-1)/Ky );
	
	// Run k-means
	Mat_<float> dist( im.size() );
	Mat_<int> label( im.size() );
	for( int it=0; it<n_iter_; it++ ) {
		// Assignment step
		dist = std::numeric_limits<float>::max();
		label = -1;
		for( int k=0; k<K; k++ ) {
			Vec3f c = im( seeds[k] );
			for( int j=std::max(0,seeds[k].y-win_sz); j<im.rows && j<=seeds[k].y+win_sz; j++ )
				for( int i=std::max(0,seeds[k].x-win_sz); i<im.cols && i<=seeds[k].x+win_sz; i++ ){
					double d = (i-seeds[k].x) * (i-seeds[k].x) + (j-seeds[k].y) * (j-seeds[k].y);
					double cd = ( im( j, i ) - c ).dot( im( j, i ) - c );
					d += col_w_ * col_w_ * cd;
					if( d < dist( j, i ) ) {
						dist( j, i ) = d;
						label( j, i ) = k;
					}
				}
		}
		
		// Update
		for( int k=0; k<K; k++ ) {
			seedsd[k] = Point2d(0,0);
			cnt[k] = 0;
		}
		for( int j=0; j<im.rows; j++ )
			for( int i=0; i<im.cols; i++ ) {
				// Fix all the pixels we messed up!
				if ( label( j, i ) < 0 ) {
// 					printf("Oops that wasn't very slick :(\n");
					for( int k=0; k<K; k++ ){
						Vec3f c = im( seeds[k] );
						double d = (i-seeds[k].x) * (i-seeds[k].x) + (j-seeds[k].y) * (j-seeds[k].y);
						double cd = ( im( j, i ) - c ).dot( im( j, i ) - c );
						d += col_w_ * col_w_ * cd;
						if( d < dist( j, i ) ) {
							dist( j, i ) = d;
							label( j, i ) = k;
						}
					}
				}
				
				seedsd[ label( j, i ) ] += Point2d( i, j );
				cnt[ label( j, i ) ] += 1;
			}
		
		for( int k=0; k<K; k++ )
			if (cnt[k] > 0)
				seeds[k] = Point( 0.5 + seedsd[k].x / cnt[k], 0.5 + seedsd[k].y / cnt[k] );
	}
	return label;
}
Exemplo n.º 28
0
Mat_< int > Superpixel::geodesicSegmentation( const Mat_< Vec3f >& im ) const {
	//std::tr1::uniform_int<int> distribution(-2, 2);
	//std::tr1::mt19937 engine; // Mersenne twister MT19937
	//auto randint = std::tr1::bind(distribution, engine);
	int s;
	srand((unsigned)time(NULL));
	s=rand()%4-2;
	
	// Compute the spacing and grid size of the superpixels
	double sp_area = 1.0 * im.cols * im.rows / K_;
	int Kx = 0.5 + im.cols / sqrt( sp_area ), Ky = 0.5 + im.rows / sqrt( sp_area );
	int K = Kx*Ky;
	
	int win_sz = 1.0 * sqrt(sp_area) + 1;
	
	// Initialize the seeds on a regular grid
	std::vector< int64_t > cnt( K );
	std::vector< Point2d > seedsd( K );
	std::vector< Point > seeds( K );
	for( int i=0,k=0; i<Kx; i++ )
		for( int j=0; j<Ky; j++, k++ )
			seeds[k] = Point( (i+0.5)*(im.cols-1)/Kx, (j+0.5)*(im.rows-1)/Ky ) + Point(rand()%4-2,rand()%4-2);/*Point( randint(), randint() );*/
	
	Mat_<float> dx( im.size() ), dy( im.size() );
	
	for( int j=0; j<im.rows; j++ )
		for( int i=0; i<im.cols; i++ ) {
			if (i)
				dx(j,i-1) = col_w_*sqrt( (im(j,i)-im(j,i-1)).dot(im(j,i)-im(j,i-1)) ) + 1;
			if (j)
				dy(j-1,i) = col_w_*sqrt( (im(j-1,i)-im(j,i)).dot(im(j-1,i)-im(j,i)) ) + 1;
		}
	
	// Run k-means
	Mat_<float> dist( im.size() );
	Mat_<int> label( im.size() );
	for( int it=0; it<n_iter_; it++ ) {
		// Assignment step
		dist = std::numeric_limits<float>::max();
		label = -1;
		for( int k=0; k<K; k++ ) {
			dist( seeds[k] ) = 0;
			label( seeds[k] ) = k;
		}
		for( int IT=0; IT<2; IT++ ){
			for( int j=0; j<im.rows; j++ )
				for( int i=0; i<im.cols; i++ ) {
					if (i && dist(j,i-1) + dx(j,i-1) < dist(j,i)) {
						dist(j,i) = dist(j,i-1) + dx(j,i-1);
						label(j,i) = label(j,i-1);
					}
					if (j && dist(j-1,i) + dx(j-1,i) < dist(j,i)) {
						dist(j,i) = dist(j-1,i) + dy(j-1,i);
						label(j,i) = label(j-1,i);
					}
				}
			for( int j=im.rows-1; j>=0; j-- )
				for( int i=im.cols-1; i>=0; i-- ) {
					if (i && dist(j,i) + dx(j,i-1) < dist(j,i-1)) {
						dist(j,i-1) = dist(j,i) + dx(j,i-1);
						label(j,i-1) = label(j,i);
					}
					if (j && dist(j,i) + dx(j-1,i) < dist(j-1,i)) {
						dist(j-1,i) = dist(j,i) + dy(j-1,i);
						label(j-1,i) = label(j,i);
					}
				}
		}
		for( int k=0; k<K; k++ ) {
			Vec3f c = im( seeds[k] );
			for( int j=std::max(0,seeds[k].y-win_sz); j<im.rows && j<=seeds[k].y+win_sz; j++ )
				for( int i=std::max(0,seeds[k].x-win_sz); i<im.cols && i<=seeds[k].x+win_sz; i++ ){
					double d = (i-seeds[k].x) * (i-seeds[k].x) + (j-seeds[k].y) * (j-seeds[k].y);
					double cd = ( im( j, i ) - c ).dot( im( j, i ) - c );
					d += col_w_ * col_w_ * cd;
					if( d < dist( j, i ) ) {
						dist( j, i ) = d;
						label( j, i ) = k;
					}
				}
		}
		
		// Update
		for( int k=0; k<K; k++ ) {
			seedsd[k] = Point2d(0,0);
			cnt[k] = 0;
		}
		for( int j=0; j<im.rows; j++ )
			for( int i=0; i<im.cols; i++ ) {
				// Fix all the pixels we messed up!
				if ( label( j, i ) < 0 ) {
// 					printf("Oops that wasn't very slick :(\n");
					for( int k=0; k<K; k++ ){
						Vec3f c = im( seeds[k] );
						double d = (i-seeds[k].x) * (i-seeds[k].x) + (j-seeds[k].y) * (j-seeds[k].y);
						double cd = ( im( j, i ) - c ).dot( im( j, i ) - c );
						d += col_w_ * col_w_ * cd;
						if( d < dist( j, i ) ) {
							dist( j, i ) = d;
							label( j, i ) = k;
						}
					}
				}
				
				seedsd[ label( j, i ) ] += Point2d( i, j );
				cnt[ label( j, i ) ] += 1;
			}
		
		for( int k=0; k<K; k++ )
			if (cnt[k] > 0)
				seeds[k] = Point( 0.5 + seedsd[k].x / cnt[k], 0.5 + seedsd[k].y / cnt[k] );
	}
	return label;
}
Exemplo n.º 29
0
int main(int argc, char** argv)
{	
    /*Polynomial2 poly2;
    poly2.kuu = -1; 
    poly2.kuv = 1; 
    poly2.kvv= -1; 
    poly2.ku = 0.25; 
    poly2.kv = 0.25; 
    poly2.k1 = 5;
    
    CurveRasterizer<Polynomial2> raster(1, 1, -100, 100, poly2);
    CurveRasterizer2<Polynomial2> raster2(1, 1, -100, 100, poly2);

    auto tr0 = clock();
    int x1 = 0;
    int x2 = 0;
    for (int i = 0; i < 10000000; i++)
    {
        raster.step();
        x1 += raster.x;
    }
    auto tr1 = clock();
    
    for (int i = 0; i < 10000000; i++)
    {
        raster2.step();
        x2 += raster2.x;
    }
    auto tr2 = clock();
    
    cout << "optimized " << double(tr1 - tr0) / CLOCKS_PER_SEC << endl;
    cout << "simple " << double(tr2 - tr1) / CLOCKS_PER_SEC << endl;
    cout << x1 << " " << x2 << endl;
    return 0;*/
    ifstream paramFile(argv[1]);
    if (not paramFile.is_open())
    {
        cout << argv[1] << " : ERROR, file is not found" << endl;
        return 0;
    }
    
    array<double, 6> params;
    
    cout << "EU Camera model parameters :" << endl;
    for (auto & p: params) 
    {
        paramFile >> p;
        cout << setw(15) << p;
    }
    cout << endl;
    paramFile.ignore();
    
    array<double, 6> cameraPose;
    cout << "Camera pose wrt the robot :" << endl;
    for (auto & e: cameraPose) 
    {
        paramFile >> e;
        cout << setw(15) << e;
    }
    cout << endl;
    paramFile.ignore();
    Transformation<double> TbaseCamera(cameraPose.data());
    
    array<double, 6> planePose;
    cout << "Plane pose :" << endl;
    for (auto & e: cameraPose) 
    {
        paramFile >> e;
        cout << setw(15) << e;
    }
    cout << endl;
    paramFile.ignore();
    Transformation<double> TbasePlane(cameraPose.data());
    
    StereoParameters stereoParams;
    paramFile >> stereoParams.u0;
    paramFile >> stereoParams.v0;
    paramFile >> stereoParams.disparityMax;
    paramFile >> stereoParams.blockSize;
    paramFile.ignore();
    
    string imageDir;
    getline(paramFile, imageDir);
    
    string imageInfo, imageName;
    array<double, 6> robotPose1, robotPose2;
    getline(paramFile, imageInfo);
    istringstream imageStream(imageInfo);
    imageStream >> imageName;
    for (auto & x : robotPose1) imageStream >> x;

    Mat8 img1 = imread(imageDir + imageName, 0);
    int counter = 2;
    while (getline(paramFile, imageInfo))
    {
        istringstream imageStream(imageInfo);
        
        imageStream >> imageName;
        for (auto & x : robotPose2) imageStream >> x;
    
        Transformation<double> T01(robotPose1.data()), T02(robotPose2.data());
        Transformation<double> TleftRight = T01.compose(TbaseCamera).inverseCompose(T02.compose(TbaseCamera));
        
        
        Mat8 img2 = imread(imageDir + imageName, 0);

        EnhancedStereo stereo(TleftRight, img1.cols, img1.rows, params.data(), params.data(), stereoParams);

        cv::Mat_<uint8_t> res;
        auto t2 = clock();
        stereo.comuteStereo(img1, img2, res);
        auto t3 = clock();
//        cout << double(t3 - t2) / CLOCKS_PER_SEC << endl;
        Mat_<float> distMat;
        Mat_<float> planeMat;
        
        stereo.computeDistance(distMat);
        Transformation<double> T0Camera = T01.compose(TbaseCamera);
        stereo.generatePlane(T0Camera.inverseCompose(TbasePlane), planeMat,
         vector<Vector3d>{Vector3d(-0.1, -0.1, 0), Vector3d(-0.1 + 3 * 0.45, -0.1, 0),
                          Vector3d(-0.1 + 3 * 0.45, 0.5, 0), Vector3d(-0.1, 0.5, 0) } );
        imshow("dist" + to_string(counter) , distMat);
        imshow("plane" , planeMat);
        imwrite("/home/bogdan/projects/plane.png", planeMat);
        double err = 0;
        double err2 = 0;
        double dist = 0;
        int N = 0;
        int Nmax = 0;
        Mat_<float> inlierMat(planeMat.size());
        inlierMat.setTo(0);
        for (int u = 0; u < distMat.cols; u++)
        {
            for (int v = 0; v < distMat.rows; v++)
            {
                
                if (planeMat(v, u) == 0) continue;
                Nmax++;
                dist += planeMat(v, u);
                inlierMat(v, u) = 1;
                if (distMat(v, u) == 0 or distMat(v, u) != distMat(v, u) or planeMat(v, u) != planeMat(v, u)) continue;
                if (abs(distMat(v, u) - planeMat(v, u)) > 0.10) continue;
                inlierMat(v, u) = 0;
                err += distMat(v, u) - planeMat(v, u);
                err2 += pow(distMat(v, u) - planeMat(v, u), 2);
                N++;
            }
        }
//        cout << (counter - 1) * 7 << " & " << dist/ Nmax * 1000 << " & " << err / N *1000 << " & " << sqrt(err2 / N)*1000  
//                << " & " << 100 * N / double(Nmax) << "\\\\" << endl << "\\hline" << endl;
        cout << "avg err : " << err / N *1000 << " avg err2 : " 
<< sqrt(err2 / N)*1000  << " number of inliers : " << 100 * N / double(Nmax) << endl;
        imshow("diff" + to_string(counter), abs(planeMat - distMat));
        imshow("inliers" + to_string(counter), inlierMat);
        counter++;
        
    }
    waitKey();
    return 0;
}
bool c_FourierTransfrom::ifftw_complex_3d(const Mat_<Vec6d> &_input,
                                         Mat_<Vec6d> &_output)
{
    size_t height = _input.rows;
    size_t width = _input.cols;
    size_t n_channels = _input.channels() / 2;
    size_t n_pixels = height * width;
    size_t n_data = n_pixels * n_channels;

    fftw_complex *in, *out;
    fftw_plan p;

    in = (fftw_complex *)fftw_malloc(sizeof(fftw_complex) * n_data);
    out = (fftw_complex *)fftw_malloc(sizeof(fftw_complex) * n_data);

    p = fftw_plan_dft_3d(height, width, n_channels, in, out, FFTW_BACKWARD,
                         FFTW_ESTIMATE);

    /*!< prepare the data */
    for (size_t i_row = 0; i_row < height; ++i_row)
    {
        const Vec3d *p = _input.ptr<Vec3d>(i_row);
        for (size_t i_col = 0; i_col < width; ++i_col)
        {
            size_t index = i_row * width + i_col;
            for (size_t k = 0; k < n_channels; ++k)
            {
                in[n_pixels * k + index][0] = p[i_col][k];
                in[n_pixels * k + index][1] = p[i_col][k + n_channels];
            }
#if 0
            in[index][0] = p[i_col][4];
            in[index][1] = p[i_col][5];
            in[n_pixels + index][0] = p[i_col][2];
            in[n_pixels + index][1] = p[i_col][3];
            in[n_pixels * 2 + index][0] = p[i_col][0];
            in[n_pixels * 2 + index][1] = p[i_col][1];
#endif
        }
    }

    fftw_execute(p);

    /*!< write back data */
    _output = Mat_<Vec6d>::zeros(_input.size());
    for (size_t i_row = 0; i_row < height; ++i_row)
    {
        Vec6d *p = _output.ptr<Vec6d>(i_row);
        for (size_t i_col = 0; i_col < width; ++i_col)
        {
            size_t index = i_row * width + i_col;
            for (size_t k = 0; k < n_channels; ++k)
            {
                p[i_col][k] = out[n_pixels * k + index][0];
                p[i_col][k + n_channels] = out[n_pixels * k + index][1];
            }
#if 0
            p[i_col][0] = out[n_pixels * 2 + index][0];
            p[i_col][1] = out[n_pixels + index][0];
            p[i_col][2] = out[index][0];
            p[i_col][3] = out[n_pixels * 2 + index][1];
            p[i_col][4] = out[n_pixels + index][1];
            p[i_col][5] = out[index][1];
#endif
        }
    }

    _output /= n_data;

    fftw_destroy_plan(p);
    fftw_free(in);
    fftw_free(out);

    return true;
}