Example #1
0
static void image_jacobian_affine_ECC(const Mat& src1, const Mat& src2,
                                      const Mat& src3, const Mat& src4,
                                      Mat& dst)
{

    CV_Assert(src1.size() == src2.size());
    CV_Assert(src1.size() == src3.size());
    CV_Assert(src1.size() == src4.size());

    CV_Assert(src1.rows == dst.rows);
    CV_Assert(dst.cols == (6*src1.cols));

    CV_Assert(dst.type() == CV_32FC1);


    const int w = src1.cols;

    //compute Jacobian blocks (6 blocks)

    dst.colRange(0,w) = src1.mul(src3);//1
    dst.colRange(w,2*w) = src2.mul(src3);//2
    dst.colRange(2*w,3*w) = src1.mul(src4);//3
    dst.colRange(3*w,4*w) = src2.mul(src4);//4
    src1.copyTo(dst.colRange(4*w,5*w));//5
    src2.copyTo(dst.colRange(5*w,6*w));//6
}
Example #2
0
static void image_jacobian_euclidean_ECC(const Mat& src1, const Mat& src2,
                                         const Mat& src3, const Mat& src4,
                                         const Mat& src5, Mat& dst)
{

    CV_Assert( src1.size()==src2.size());
    CV_Assert( src1.size()==src3.size());
    CV_Assert( src1.size()==src4.size());

    CV_Assert( src1.rows == dst.rows);
    CV_Assert(dst.cols == (src1.cols*3));
    CV_Assert(dst.type() == CV_32FC1);

    CV_Assert(src5.isContinuous());

    const float* hptr = src5.ptr<float>(0);

    const float h0 = hptr[0];//cos(theta)
    const float h1 = hptr[3];//sin(theta)

    const int w = src1.cols;

    //create -sin(theta)*X -cos(theta)*Y for all points as a block -> hatX
    Mat hatX = -(src3*h1) - (src4*h0);

    //create cos(theta)*X -sin(theta)*Y for all points as a block -> hatY
    Mat hatY = (src3*h0) - (src4*h1);


    //compute Jacobian blocks (3 blocks)
    dst.colRange(0, w) = (src1.mul(hatX))+(src2.mul(hatY));//1

    src1.copyTo(dst.colRange(w, 2*w));//2
    src2.copyTo(dst.colRange(2*w, 3*w));//3
}
Mat get_ft(Mat &src1, Mat &src2){
    Mat ft;
    Mat kernel = Mat::ones(2, 2, CV_64FC1);
    kernel = kernel.mul(-1);
    
    Mat dst1, dst2;
    filter2D(src1, dst1, -1, kernel);
    kernel = kernel.mul(-1);
    filter2D(src2, dst2, -1, kernel);
    
    ft = dst1 + dst2;
    return ft;
}
Example #4
0
	void blendLapPyrs() {
		//获得每层金字塔中直接用左右两图Laplacian变换拼成的图像resultLapPyr
		resultHighestLevel = leftHighestLevel.mul(maskGaussianPyramid.back()) +
			rightHighestLevel.mul(Scalar(1.0,1.0,1.0) - maskGaussianPyramid.back());
		for (int l=0; l<levels; l++) {
			Mat A = leftLapPyr[l].mul(maskGaussianPyramid[l]);
			Mat antiMask = Scalar(1.0,1.0,1.0) - maskGaussianPyramid[l];
			Mat B = rightLapPyr[l].mul(antiMask);
			Mat_<Vec3f> blendedLevel = A + B;

			resultLapPyr.push_back(blendedLevel);
		}
	}
	// ----------------------------------------------------------------------------
	//
	//								private function impl
	//
	// ----------------------------------------------------------------------------
	void FCDropoutLayer::fpropOne(Mat &ouFeatMaps, Mat &mask, 
								  const Mat &inFeatMaps, 
								  const float dropoutRate,
								  const bool isStaticMask)
	{
		float scale = 1 / (1 - dropoutRate);
		if (!isStaticMask) {
			cv::randu(mask, 0, 1);
			cv::threshold(mask, mask, dropoutRate, scale, CV_THRESH_BINARY);
			ouFeatMaps = inFeatMaps.mul(mask);
		}
		else
			ouFeatMaps = inFeatMaps.mul(mask);
	}
Example #6
0
Mat cvt2NonLinear(Mat srcImg)  {
	int choice;
	double c;
	cout << "\nCHUYEN MAU THEO MO HINH PHI TUYEN TINH" << endl;
	cout << "Chon 1 trong 2 cach sau: " << endl;
	cout << "   1.Chuyen theo ham logarithm." << endl;
	cout << "   2.Chuyen theo ham mu." << endl;
	cout << "Lua chon: ";
	cin >> choice;

	switch (choice)  {
	case 1:  {
		cout << "   - Nhap gia tri c: ";
		cin >> c;
		myfunc_nonLinearChange_Log(srcImg, c);
		// Tạo ảnh kết quả (res) có kích thước và kiểu giống ảnh gốc (srcImg) nhưng các giá trị = 0
		Mat res = Mat::zeros(srcImg.size(), srcImg.type());
		// + 1 để tránh log(0)
		srcImg = srcImg + 1;
		// Chuyển định dạng pixel của ảnh gốc (srcImg) thành CV_32F - dạng float để tính logarithm
		srcImg.convertTo(srcImg, CV_32F);
		// Ảnh kết quả = log(Ảnh gốc)
		log(srcImg, res);
		// Chuyển về [0..255]
		normalize(res, res, 0, 255, NORM_MINMAX);
		convertScaleAbs(res, res);
		// Ảnh kết quả = Ảnh kết quả * c
		res.mul(res, c);
		return res;
	}
	case 2:  {
		cout << "   - Nhap gia tri c: ";
		cin >> c;
		myfunc_nonLinearChange_Exp(srcImg, c);
		// Tạo ảnh kết quả (res) có kích thước và kiểu giống ảnh gốc (srcImg) nhưng các giá trị = 0
		Mat res = Mat::zeros(srcImg.size(), srcImg.type());
		// Chuyển định dạng pixel của ảnh gốc (srcImg) thành CV_32F - dạng float để tính logarithm
		srcImg.convertTo(srcImg, CV_32F);
		// Ảnh kết quả = e ^ (Ảnh gốc)
		exp(srcImg, res);
		res.mul(c);
		return res;
	}

			default:
				return Mat();
	}
}
Example #7
0
/* finds the convolution of a matrix and kernel
 * the input should be an 1-channel floating point image 
 * written as an illustration only */
int naive_convolution(const Mat& input, Mat& output, const Mat& kernel)
{
    int krows = kernel.rows;
    int kcols = kernel.cols;

    /* region of interest */
    Mat roi = input(cv::Rect(0, 0, krows, kcols));

    for (int r = 0; r < output.rows; r++) {
        for (int c = 0; c < output.cols; c++) {
            float& pixel = output.at<float>(r, c);

            if ((r < krows / 2) || (r > output.rows - krows / 2 - 1)
             || (c < kcols / 2) || (c > output.cols - kcols / 2 - 1)) {
                /* set out of bounds pixels to black */
                pixel = 0;
            } else {
                /* move the ROI */
                roi = input(cv::Rect(c - kcols / 2, r - krows / 2,
                                     kcols, krows));

                /* perform element-wise multiplication and take the sum
                 * to get the value of the current pixel */
                pixel = sum(kernel.mul(roi))[0];
            }
        }
    }
    return 0;
}
void FiltreElip::goFiltre(Mat& img, Mat& gray, Mat& effet){

	Mat work_on;
	if(_image_we_work_on==0){
		equalizeHist( gray, work_on );
	}
	else{
		equalizeHist( effet, work_on );
	}
	int rows=work_on.rows;
	int cols=work_on.cols;
//	std::cout << rows << " rows" << std::endl;
//	std::cout << cols << " cols" << std::endl;
	int trying=0;
	
	for(int i=0;i<rows-_filtre1.rows;i=i+5){
		for(int j=0;j<cols-_filtre1.cols;j=j+5){
			Mat tmp = work_on(Range(i,i+_filtre1.rows), Range(j,j+_filtre1.cols));

			tmp.mul(_filtre1);			
			trying=sum(tmp).val[0];
			
			
			if(_sum1>trying || _sum1==-1){
			//std::cout<<"sum : "<<_sum1 <<" "<< trying << std::endl;
				_sum1=trying;
				_rec1=Rect(j,i,_filtre1.rows,_filtre1.cols);
					
			}
		}
	}
}
Example #9
0
void TileHolder::makeTile(const GridPt &tilePt, ImgFetcher &fetch, Mat &out, Mat &bg) {
	Point2f tileTl(tl.x + tilePt[0] * tileSz.width, tl.y + tilePt[1] * tileSz.height);

	const set<ImData> &bin = getBin(tilePt);
	out = Mat::zeros(tileSz, CV_32FC1);//gather the tile data in out
	
	Mat weightIm = Mat::zeros(tileSz, CV_32FC1) + FLT_MIN;//+ 1e-90;	// avoid divide by zero, maybe static this?

	Mat curIm;
	for (const ImData &dat : bin) {
		fetch.getMat(dat.file, curIm);
		if (curIm.type() == CV_8U) {
			curIm.convertTo(curIm, CV_32F, 1. / 255);
		} else if (curIm.type() != CV_32F) {
			fprintf(stderr, "err: bad image type\n");
			exit(1);
		}

		if (bg.data)
		{
			assert(bg.size() == curIm.size());
			curIm -= bg;
		}

		std::cout << dat.file << std::endl;
		//saveFloatIm("testsomewhere.tif", weightMat);
		blit(curIm.mul(weightMat), out, dat.pos - tileTl);
		blit(weightMat, weightIm, dat.pos - tileTl);
	}
	out /= weightIm;
}
Example #10
0
  /**
   * Get the average image at the given altitude.
   * If the altitude is not located exactly at one of the altitude bins,
   * the average image is interpolated between any overlapping bins.
   * @param _dst the output image (zeros at desired resolution)
   * @param alt the altitude the image was taken at
   * @param pitch the pitch of the vehicle
   * @param roll the roll of the vehicle
   */
  void getAverage(cv::OutputArray _dst, double alt, double pitch, double roll) {
    Mat dst = _dst.getMat();
    int width = dst.size().width;
    int height = dst.size().height;
    Mat D = Mat::zeros(height, width, CV_32F);
    double width_m = width * cameras.pixel_sep;
    double height_m = height * cameras.pixel_sep;
    interp::distance_map(D, alt, pitch, roll, width_m, height_m, cameras.focal_length);
    // now discretize into slices
    int i = 0;
    Mat W = Mat::zeros(D.size(), CV_32F);
    while(cv::countNonZero(W) == 0) {
      interp::dist_weight(D, W, alt_step, i++);
    }
    while(cv::countNonZero(W) > 0) {
      Slice<int>* slice = getSlice(i);
      Mat sAverage;
      // get the slice average
      boost::mutex* mutex = slice->get_mutex();
      { // protect slice with mutex to prevent interleaved read/write operations
	boost::lock_guard<boost::mutex> lock(*mutex);
	sAverage = slice->getLightfield()->getAverage();
      }
      dst += sAverage.mul(W); // multiply by slice weight
      interp::dist_weight(D, W, alt_step, i++);
    }
  }
Example #11
0
    void process(InputArray _src, OutputArray _dst)
    {
        Mat src = _src.getMat();
        CV_Assert(!src.empty());
        _dst.create(src.size(), CV_32FC3);
        Mat img = _dst.getMat();

        Ptr<Tonemap> linear = createTonemap(1.0f);
        linear->process(src, img);

        Mat gray_img;
        cvtColor(img, gray_img, COLOR_RGB2GRAY);
        Mat log_img;
        log(gray_img, log_img);
        float mean = expf(static_cast<float>(sum(log_img)[0]) / log_img.total());
        gray_img /= mean;
        log_img.release();

        double max;
        minMaxLoc(gray_img, NULL, &max);

        Mat map;
        log(gray_img + 1.0f, map);
        Mat div;
        pow(gray_img / static_cast<float>(max), logf(bias) / logf(0.5f), div);
        log(2.0f + 8.0f * div, div);
        map = map.mul(1.0f / div);
        div.release();

        mapLuminance(img, img, gray_img, map, saturation);

        linear->setGamma(gamma);
        linear->process(img, img);
    }
Example #12
0
void ImageCB(const sensor_msgs::ImageConstPtr& msg) {

    cv_bridge::CvImagePtr cv_ptr;
	Mat frame;
	Mat output;
	
	try {
		cv_ptr = cv_bridge::toCvCopy(msg, "bgr8");
	} catch (cv_bridge::Exception& e) {
		ROS_ERROR("CV-Bridge error: %s", e.what());
		return;
	}
	
	frame = cv_ptr->image;

    frame = frame.mul(mask);

    Mat white_lines = findWhiteLines(frame);

    Mat blue_lines = findBlueLines(frame);

    Mat yellow_lines = findYellowLines(frame);

    Mat orange_blobs = findOrange(frame);

    output = white_lines + blue_lines + yellow_lines + orange_blobs;

    sensor_msgs::Image outmsg;

    cv_ptr->image = output;
	cv_ptr->encoding = "bgr8";
	cv_ptr->toImageMsg(outmsg);
	img_pub.publish(outmsg);
}
Mat dSigmoid(Mat& mat)
{
	Mat res = 1.0 - mat;
    res = res.mul(mat);
	return res;

}
Example #14
0
 void signedPow(Mat src, float power, Mat& dst)
 {
     Mat sign = (src > 0);
     sign.convertTo(sign, CV_32F, 1.0f/255.0f);
     sign = sign * 2.0f - 1.0f;
     pow(abs(src), power, dst);
     dst = dst.mul(sign);
 }
Example #15
0
void specgram::specgram_to_cepsgram(const Mat& specgram,
                                    const Mat& filter_bank,
                                    Mat& cepsgram)
{
    cepsgram = (specgram.mul(specgram) / specgram.cols) * filter_bank;
    cv::log(cepsgram, cepsgram);
    return;
}
Mat PhaseShiftCoefficientRealPart(Mat laplevel, Mat rieszXLevel, Mat rieszYLevel, Mat phaseMagnifiedCos, Mat phaseMagnifiedSin)
{
    Mat r;
    Mat pm,expPhaseReal,expPhaseImag;
    Mat expphix,expphiy,tmp;

    sqrt(phaseMagnifiedCos.mul(phaseMagnifiedCos)+phaseMagnifiedSin.mul(phaseMagnifiedSin),pm);
    expPhaseReal = Cos(pm);
    expPhaseImag = Sin(pm);
    divide(expPhaseImag,pm,tmp);
    expphix = phaseMagnifiedCos.mul(tmp);
    expphiy = phaseMagnifiedSin.mul(tmp);

    r = expPhaseReal.mul(laplevel)-expphix.mul(rieszXLevel)-expphiy.mul(rieszYLevel);

    return r;
}
Example #17
0
// These can all be static
void specgram::wav_to_specgram(const Mat& wav_col_mat,
                               const int frame_length_tn,
                               const int frame_stride_tn,
                               const int max_time_steps,
                               const Mat& window,
                               Mat& specgram)
{
    // const Mat& wav_mat = wav.get_data();
    // Read as a row vector
    Mat wav_mat = wav_col_mat.reshape(1, 1);

    // TODO: support more sample formats
    if (wav_mat.elemSize1() != 2) {
        throw std::runtime_error(
                "Unsupported number of bytes per sample: " + std::to_string(wav_mat.elemSize1()));
    }

    // Go from time domain to strided signal
    Mat wav_frames;
    {
        int num_frames = ((wav_mat.cols - frame_length_tn) / frame_stride_tn) + 1;
        num_frames = std::min(num_frames, max_time_steps);
        // ensure that there is enough data for at least one frame
        assert(num_frames >= 0);

        wav_frames.create(num_frames, frame_length_tn, wav_mat.type());
        for (int frame = 0; frame < num_frames; frame++) {
            int start = frame * frame_stride_tn;
            int end   = start + frame_length_tn;
            wav_mat.colRange(start, end).copyTo(wav_frames.row(frame));
        }
    }

    // Prepare for DFT by converting to float
    Mat input;
    wav_frames.convertTo(input, CV_32FC1);

    // Apply window if it has been created
    if (window.cols == frame_length_tn) {
        input = input.mul(cv::repeat(window, input.rows, 1));
    }

    Mat planes[] = {input, Mat::zeros(input.size(), CV_32FC1)};
    Mat compx;
    cv::merge(planes, 2, compx);
    cv::dft(compx, compx, cv::DFT_ROWS);
    compx = compx(Range::all(), Range(0, frame_length_tn / 2  + 1));

    cv::split(compx, planes);
    cv::magnitude(planes[0], planes[1], planes[0]);

    // NOTE: at this point we are returning the specgram representation in
    // (time_steps, freq_steps) shape order.

    specgram = planes[0];

    return;
}
Example #18
0
//Rate a location on how likely it is to be a bubble
double rateBubble(Mat& det_img_gray, Point bubble_location, PCA& my_PCA){
	Mat query_pixels, pca_components;
	getRectSubPix(det_img_gray, Point(14,18), bubble_location, query_pixels);
	query_pixels.reshape(0,1).convertTo(query_pixels, CV_32F);
	pca_components = my_PCA.project(query_pixels);
	//The rating is the SSD of query pixels and their back projection
	Mat out = my_PCA.backProject(pca_components)- query_pixels;
	return sum(out.mul(out)).val[0];
}
/* Removes yellow lines from the image by minimizing the edges when 
 calculating green*(1+lambda)-blue*lambda.
 */
void removeyellow(Mat& img) {
	vector<Mat> bgr;
	split(img, bgr);

	Mat g = bgr[1]-1.0;
	Mat b = bgr[0]-1.0;

	double lambda = mean(g.mul(b-g))[0]/mean(g.mul(g)-2*g.mul(b)+b.mul(b))[0];

	LOGI("Remove yellow lambda = %f", lambda);
	// REMOVED BECAUSE NOT NEEDED ON IDEOS and lambda=0.53
	//bgr[1]=bgr[1]*(1+lambda)-bgr[0]*lambda;

	// Also reset blue
	bgr[0]=bgr[1];

	merge(bgr, img);
}
Example #20
0
double ParticleTrackingAlg::calcDistance(const Mat &histOrig, const Mat &histTmp)
{
	double distanceSum = 0.0, firstLen = 0.0, secondLen = 0.0;
	//std::cout<<"========="<<std::endl<<histOrig<<std::endl<<histTmp<<std::endl;//
	Mat result = histOrig.mul(histTmp);
	sqrt(result, result);
	distanceSum = sum(result).val[0];
	return distanceSum;
}
Example #21
0
	void coherenceEnhancingShockFilter(cv::InputArray src, cv::OutputArray dest_, const int sigma, const int str_sigma_, const double blend, const int iter)
	{
		Mat dest = src.getMat();
		const int str_sigma = min(31, str_sigma_);

		for (int i = 0; i < iter; i++)
		{
			Mat gray;
			if (src.channels() == 3)cvtColor(dest, gray, CV_BGR2GRAY);
			else gray = dest;

			Mat eigen;
			if (gray.type() == CV_8U || gray.type() == CV_32F || gray.type() == CV_64F)
				cornerEigenValsAndVecs(gray, eigen, str_sigma, 3);
			else
			{
				Mat grayf; gray.convertTo(grayf, CV_32F);
				cornerEigenValsAndVecs(grayf, eigen, str_sigma, 3);
			}

			vector<Mat> esplit(6);
			split(eigen, esplit);
			Mat x = esplit[2];
			Mat y = esplit[3];
			Mat gxx;
			Mat gyy;
			Mat gxy;
			Sobel(gray, gxx, CV_32F, 2, 0, sigma);
			Sobel(gray, gyy, CV_32F, 0, 2, sigma);
			Sobel(gray, gxy, CV_32F, 1, 1, sigma);

			Mat gvv = x.mul(x).mul(gxx) + 2 * x.mul(y).mul(gxy) + y.mul(y).mul(gyy);

			Mat mask;
			compare(gvv, 0, mask, cv::CMP_LT);

			Mat di, ero;
			erode(dest, ero, Mat());
			dilate(dest, di, Mat());
			di.copyTo(ero, mask);
			addWeighted(dest, blend, ero, 1.0 - blend, 0.0, dest);
		}
		dest.copyTo(dest_);
	}
Example #22
0
Mat Camera::computePMatrix() {
    Mat AR = computeCalibrationMatrix() * computeRotationMatrix();
    printMat("A", computeCalibrationMatrix());
    printMat("R", computeRotationMatrix());
    Mat ARC = AR * computeCameraCenterMatrix(false);
    Mat p;
    hconcat(AR, ARC.mul(-1), p);
    printMat("P", p);
    return p;
}
Example #23
0
 /*
 Implements colorspace noise perturbation as described in:
 Krizhevsky et. al., "ImageNet Classification with Deep Convolutional Neural Networks"
 Constructs a random coloring pixel that is uniformly added to every pixel of the image.
 pixelstd is filled with normally distributed values prior to calling this function.
 */
 void lighting(Mat& inout, float pixelstd[]) {
     // Skip transformations if given deterministic settings
     if (_params->_colorNoiseStd == 0.0) {
         return;
     }
     Mat alphas(3, 1, CV_32FC1, pixelstd);
     alphas = (CPCA * CSTD.mul(alphas));  // this is the random coloring pixel
     auto pixel = alphas.reshape(3, 1).at<Scalar_<float>>(0, 0);
     inout = (inout + pixel) / (1.0 + _params->_colorNoiseStd);
 }
Example #24
0
void ncc(Mat L, Mat R, Mat &imd) {
	Mat Lb, Rb;
	boxFilter(L,  Lb, nccsize);
	boxFilter(R,  Rb, nccsize);
	Mat LL = L.mul(L);
	Mat RR = R.mul(R);
	Mat LR = L.mul(R);
	Mat LLb, RRb, LRb;
	boxFilter(LL,  LLb, nccsize);
	boxFilter(RR,  RRb, nccsize);
	boxFilter(LR,  LRb, nccsize);
	Mat LL2 = LLb - Lb.mul(Lb);
	Mat RR2 = RRb - Rb.mul(Rb);
	Mat LR2 = LRb - Lb.mul(Rb);
	Mat den = LL2.mul(RR2) + ncceps;
	sqrt(den, den);
	Mat ncc = LR2 / den;
	ncc.convertTo(imd, CV_8U, 128, 128);
}
void WhitenessFilter(Mat& hsv_image, Mat& fin_img) {
    Mat result = 255 * Mat::ones(hsv_image.size(), CV_16UC1);
    Mat tmp;
    hsv_image.convertTo(tmp, CV_16UC3, 1.0);
    Mat channel[3];
    split(tmp, channel);
    result = result - channel[1];
    result = result.mul(channel[2]);
    result.convertTo(fin_img, CV_8UC1, 1.0/255);
}
Example #26
0
void lowPassFilter(Mat &F) 
{
    Mat H(F.rows, F.cols, CV_32FC2, Scalar(0, 0));

    swapFreq(F);

	circle(H, Point(H.cols/2, H.rows/2), 200, Scalar::all(1), -1);
	F = F.mul(H);

    swapFreq(F);
}
void MultiplyByComplements(Mat* images, Mat* complements, Mat* results) {
    for(size_t i = 0; i < KERNAL_COUNT; i++) {
        Mat image;
        Mat complement;
        Mat result = Mat::zeros(images[0].size(), CV_16UC1);
        images[i].convertTo(image, CV_16UC1, 1.0);
        complements[i].convertTo(complement, CV_16UC1, 1.0);
        result = image.mul(complement);
        result.convertTo(results[i], CV_8UC1, 1.0/255); // TODO figure this const out
    }
}
Mat SegmentationUtility::obtainImageWithBoundary(
    const Mat& inputImage, const Mat& segImage) {
    Mat segmentedImage;
    normalize(segImage, segmentedImage, 0.0, 1.0, cv::NORM_MINMAX, CV_32FC3);
    Mat boundaryImage =
        SegmentationUtility::computeBoundary(segmentedImage);
    boundaryImage = multiply(boundaryImage, cv::Scalar(255, 255, 0));

    Mat contourImage = inputImage;

    return contourImage + contourImage.mul(boundaryImage);
}
vector<Mat> DifferencePhaseAmplitude(Mat &c_real, Mat &cRzX, Mat &cRzY, Mat &p_real, Mat &pRzX, Mat &pRzY )
{
    vector<Mat> v(3);

    {
        Mat qconjReal=c_real.mul(p_real) + cRzX.mul(pRzX) + cRzY.mul(pRzY);
        Mat qconjX= -c_real.mul(pRzX)+p_real.mul(cRzX);
        Mat qconjY= -c_real.mul(pRzY)+p_real.mul(cRzY);

        Mat num=qconjX.mul(qconjX)+qconjY.mul(qconjY);
        Mat qconjAmplitude;
        sqrt(qconjReal.mul(qconjReal)+num,qconjAmplitude);
		Mat ratio;
		divide(qconjReal, qconjAmplitude, ratio);
        Mat diffPhase = ArcCos(ratio);
        Mat cosAngle;
        Mat sinAngle;
        divide(qconjX,num,cosAngle);
        divide(qconjY,num,sinAngle);
        Mat diffPhaseCos=diffPhase.mul(cosAngle);
        Mat diffPhaseSin=diffPhase.mul(sinAngle);
       
        Mat amplitude;
        sqrt(qconjAmplitude,amplitude);
        v[0]=diffPhaseCos;
        v[1]=diffPhaseSin;
        v[2]=amplitude;
		DisplayImage(c_real, "phasecos");
		DisplayImage(cRzX, "phaseSion");
		DisplayImage(cRzY, "p");
	}
return v;
}
void projectImage(
	double* arrar,
	vector<Point3d> vec,
	Mat imageVector,
	const char* filename,
	int n)
{
	Mat projectMat = Mat(3,3, CV_64F,arrar);          //生成一个投影矩阵,将三维点投影到二维平面上

	Mat PointMat = Mat(vec).reshape(1).t();                 //投影矩阵与三维点阵乘积
	Mat result;
	result = projectMat * PointMat;

	Mat G = result.row(0).clone();                          //生成图像上的二维点阵
	Mat H = result.row(1).clone();
	Mat J = result.row(2).clone();

	vconcat(G.mul(1/J),H.mul(1/J),imageVector);
	imageVector = imageVector.t();

	Mat delimav;                                            //去掉负数项,得到最终投影后的有效点
	int t = n+1,count =0;
	for(int i=0;i<t;i++)
		if(imageVector.at<double>(i,0) < 0 || imageVector.at<double>(i,1) < 0
			||imageVector.at<double>(i,0) >1 || imageVector.at<double>(i,1) >1)
		{
			Mat front = imageVector(Range(0,i),Range::all());
			Mat back = imageVector(Range(i+1,t),Range::all());
			vconcat(front,back,delimav);

			imageVector.release();
			imageVector = delimav.clone();
			//--------------------//
			i--;
			t--;
		}                                                     // imageVector为投影后的坐标
	printFile(imageVector, filename);
}