示例#1
0
文件: blenders.cpp 项目: 93sam/opencv
void createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat> &pyr)
{
#ifdef HAVE_TEGRA_OPTIMIZATION
    if(tegra::createLaplacePyr(img, num_levels, pyr))
        return;
#endif

    pyr.resize(num_levels + 1);

    if(img.depth() == CV_8U)
    {
        if(num_levels == 0)
        {
            img.convertTo(pyr[0], CV_16S);
            return;
        }

        Mat downNext;
        Mat current = img;
        pyrDown(img, downNext);

        for(int i = 1; i < num_levels; ++i)
        {
            Mat lvl_up;
            Mat lvl_down;

            pyrDown(downNext, lvl_down);
            pyrUp(downNext, lvl_up, current.size());
            subtract(current, lvl_up, pyr[i-1], noArray(), CV_16S);

            current = downNext;
            downNext = lvl_down;
        }

        {
            Mat lvl_up;
            pyrUp(downNext, lvl_up, current.size());
            subtract(current, lvl_up, pyr[num_levels-1], noArray(), CV_16S);

            downNext.convertTo(pyr[num_levels], CV_16S);
        }
    }
    else
    {
        pyr[0] = img;
        for (int i = 0; i < num_levels; ++i)
            pyrDown(pyr[i], pyr[i + 1]);
        Mat tmp;
        for (int i = 0; i < num_levels; ++i)
        {
            pyrUp(pyr[i + 1], tmp, pyr[i].size());
            subtract(pyr[i], tmp, pyr[i]);
        }
    }
}
示例#2
0
Mat_<Vec3f> LaplacianBlending::reconstructImgFromLapPyramid() {
    Mat currentImg = resultSmallestLevel;
    for (int l=levels-1; l>=0; l--) {
        Mat up;

        pyrUp(currentImg, up, resultLapPyr[l].size());
        currentImg = up + resultLapPyr[l];
    }
    return currentImg;
}
示例#3
0
void bilateral(Mat& frame) {
	Mat tmp;
	for (int i = 0; i < 2; ++i) {
		pyrDown(frame, frame, Size(frame.cols / 2, frame.rows / 2));
	}
	bilateralFilter(tmp, frame, 3, 3, 3);
	for (int i = 0; i < 2; ++i) {
		pyrUp(frame, frame, Size(frame.cols * 2, frame.rows * 2));
	}
}
void ImagePyramid::on_pyrUpButton_clicked()
{
    if (pyrUpCount < 3){
        pyrUp(image, image , Size(image.cols * 2, image.rows * 2));
        pyrUpCount++;
        ui->image->setPixmap(ImageHandler::getQPixmap(image));
    } else {
        QMessageBox::warning(this, "Memory out warning", "Can't perform this operation due to memroy insufficient");
    }
}
示例#5
0
void buildImgFromLaplacePyr(const vector<cv::Mat> &pyr, const int levels, cv::Mat &dst)
{
	cv::Mat currentLevel = pyr[levels];

    for (int level = levels-1; level >= 0; --level) {
		cv::Mat up;
        pyrUp(currentLevel, up, pyr[level].size());
        currentLevel = up+pyr[level];
    }
    dst = currentLevel.clone();
}
示例#6
0
文件: blenders.cpp 项目: 93sam/opencv
void restoreImageFromLaplacePyr(std::vector<Mat> &pyr)
{
    if (pyr.empty())
        return;
    Mat tmp;
    for (size_t i = pyr.size() - 1; i > 0; --i)
    {
        pyrUp(pyr[i], tmp, pyr[i - 1].size());
        add(tmp, pyr[i - 1], pyr[i - 1]);
    }
}
示例#7
0
void bilateralSatured(Mat& frame) {
	Mat tmp;
	for (int i = 0; i < 2; ++i) {
		pyrDown(frame, frame, Size(frame.cols / 2, frame.rows / 2));
	}
	saturar(frame, tmp, 55);
	bilateralFilter(tmp, frame, 3, 3, 3);
	for (int i = 0; i < 2; ++i) {
		pyrUp(frame, frame, Size(frame.cols * 2, frame.rows * 2));
	}
}
Mat_<Vec3f> LaplacianBlending::reconstructImgFromLapPyramid() {  
	//将左右laplacian图像拼成的resultLapPyr金字塔中每一层  
	//从上到下插值放大并相加,即得blend图像结果  
	Mat currentImg = resultHighestLevel;  
	for (int l=levels-1; l>=0; l--) {  
		Mat up;   
		pyrUp(currentImg, up, resultLapPyr[l].size());  
		currentImg = up + resultLapPyr[l];  
	}  
	return currentImg;  
}  
Mat LapalicanClass::reconstructImg()
{
	targetImg = bgGauPyramid[levels - 1];
	pyrDown(targetImg, targetImg);
	Mat up;
	for (int l = levels - 1; l >= 0; l--)
	{
		pyrUp(targetImg, up);
		targetImg = up + bgLapPyr[l];
	}
	return targetImg;
}
/*----------------------------
 * 功能 : 图像去噪
 *----------------------------
 * 函数 : PointCloudAnalyzer::imageDenoising
 * 访问 : private 
 * 返回 : void
 *
 * 参数 : img	[in]	待处理图像,原位操作
 * 参数 : iters	[in]	形态学处理次数
 */
void PointCloudAnalyzer::imageDenoising( cv::Mat& img, int iters )
{
	cv::Mat pyr = cv::Mat(img.cols/2, img.rows/2, img.type());

	IplImage iplImg = img;
	cvSmooth(&iplImg, &iplImg, CV_GAUSSIAN, 3, 3);	// 平滑滤波

	pyrDown(img, pyr);	// 对平滑后的图像进行二次缩放
	pyrUp(pyr, img);

	erode(img, img, 0, cv::Point(-1,-1), iters);	// 图像腐蚀
	dilate(img, img, 0, cv::Point(-1,-1), iters);	// 图像膨胀
}
void LapalicanClass::reconstructImg()
{
	targetImg = bgGauPyramid[levels - 1];
	pyrDown(targetImg, targetImg);
	Mat up;
	for (int l = levels - 1; l >= 0; l--)
	{
		pyrUp(targetImg, up);
		targetImg = up + bgLapPyr[l];	
	}
	imshow("ta", targetImg);
	waitKey(0);
}
示例#12
0
void MultiBandBlender::buildLaplacianPyramid(vector<Mat>& gaussianPymid,vector<Mat>& laplacianPymid)
{
	int len=gaussianPymid.size();
	if(len<=0) return;

	Mat upsampleMat;
	for(int i=1;i<len;++i)
	{
		pyrUp(gaussianPymid[i],upsampleMat,gaussianPymid[i-1].size());
		laplacianPymid.push_back(gaussianPymid[i-1]-upsampleMat);
	}
	laplacianPymid.push_back(gaussianPymid[len-1]);
}
示例#13
0
void LaplacianBlending::buildLaplacianPyramid(const Mat& img, vector<Mat_<Vec3f> >& lapPyr, Mat& smallestLevel) {
    lapPyr.clear();
    Mat currentImg = img;
    for (int l=0; l<levels; l++) {
        Mat down,up;
        pyrDown(currentImg, down);
        pyrUp(down, up, currentImg.size());
        Mat lap = currentImg - up;
        lapPyr.push_back(lap);
        currentImg = down;
    }
    currentImg.copyTo(smallestLevel);
}
示例#14
0
////////////////////////
/// Upsampling /////////
////////////////////////
void buildImgFromGaussPyr(const cv::Mat &pyr, const int levels, cv::Mat &dst, cv::Size size)
{
	cv::Mat currentLevel = pyr.clone();

    for (int level = 0; level < levels; ++level) {
		cv::Mat up;
        pyrUp(currentLevel, up);
        currentLevel = up;
    }
    // Resize the image to comprehend errors due to rounding
    resize(currentLevel,currentLevel,size);
    currentLevel.copyTo(dst);
}
void LapalicanClass::buildLaplacianPyramid(const Mat& img, vector<Mat_<Vec3b> >& lapPyr)
{
	lapPyr.clear();
	Mat currentImg = img;
	for (int l = 0; l<levels; l++) {
		Mat down, up;
		pyrDown(currentImg, down);
		pyrUp(down, up, currentImg.size());
		Mat lap = currentImg - up;

		lapPyr.push_back(lap);
		currentImg = down;
		
	}
}
示例#16
0
void buildLaplacePyrFromImg(const cv::Mat &img, const int levels, vector<cv::Mat> &pyr)
{
    pyr.clear();
	cv::Mat currentLevel = img;

    for (int level = 0; level < levels; ++level) {
		cv::Mat down, up;
        pyrDown(currentLevel, down);
        pyrUp(down, up, currentLevel.size());
		cv::Mat laplace = currentLevel - up;
        pyr.push_back(laplace);
        currentLevel = down;
    }
    pyr.push_back(currentLevel);
}
示例#17
0
Mat MultiBandBlender::reconstruct(vector<Mat>& laplacianPymid1,vector<Mat>& laplacianPymid2,vector<Mat>& gaussianPymidMask)
{
	Mat destMat,tempMat;
	for(int level= laplacianPymid1.size()-1;level>=0;--level)
	{
		int rows =gaussianPymidMask[level].rows,cols = gaussianPymidMask[level].cols;
		if(destMat.empty())
		{
			destMat = Mat::zeros(rows,cols,CV_32FC3);
		}
		else 
		{
			tempMat = destMat;
			pyrUp(tempMat,destMat,Size(cols,rows));
		}
		destMat+=(laplacianPymid1[level].mul(gaussianPymidMask[level])+laplacianPymid2[level].mul(Scalar(1.0,1.0,1.0) - gaussianPymidMask[level]));
	}
	return destMat;
}
示例#18
0
PERF_TEST_P(Size_MatType, pyrUp, testing::Combine(
                testing::Values(sz720p, szVGA, szQVGA, szODD),
                testing::Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16SC1, CV_16SC3, CV_16SC4, CV_32FC1, CV_32FC3, CV_32FC4)
                )
            )
{
    Size sz = get<0>(GetParam());
    int matType = get<1>(GetParam());
    const double eps = CV_MAT_DEPTH(matType) <= CV_32S ? 1 : 1e-5;
    perf::ERROR_TYPE error_type = CV_MAT_DEPTH(matType) <= CV_32S ? ERROR_ABSOLUTE : ERROR_RELATIVE;

    Mat src(sz, matType);
    Mat dst(sz.height*2, sz.width*2, matType);

    declare.in(src, WARMUP_RNG).out(dst);

    TEST_CYCLE() pyrUp(src, dst);

    SANITY_CHECK(dst, eps, error_type);
}
/*----------------------------
 * 功能 : 图像去噪
 *----------------------------
 * 函数 : PointCloudAnalyzer::imageDenoising
 * 访问 : private 
 * 返回 : void
 *
 * 参数 : img	[in]	待处理图像,原位操作
 * 参数 : iters	[in]	形态学处理次数
 */
void PointCloudAnalyzer::imageDenoising( cv::Mat& img, int iters )
{
	cv::Mat pyr = cv::Mat(img.cols/2, img.rows/2, img.type());

	IplImage iplImg = img;
	cvSmooth(&iplImg, &iplImg, CV_GAUSSIAN, 3, 3);	// 平滑滤波CV_GAUSSIAN (gaussian blur) - 对图像进行核大小为 param1×param2 的高斯卷积
	GaussianBlur( img, img, Size(3,3), 0, 0, BORDER_DEFAULT );
	pyrDown(img, pyr);	// 对平滑后的图像进行二次缩放
	pyrUp(pyr, img);

	erode(img, img, 0, cv::Point(-1,-1), iters);	// 图像腐蚀开运算,去除小明亮区域
	dilate(img, img, 0, cv::Point(-1,-1), iters);	// 图像膨胀
/*	int dilation_type;
	int dilation_size=20;
    dilation_type = MORPH_RECT; 
    Mat element = getStructuringElement( dilation_type,
                                       Size( 2*dilation_size + 1, 2*dilation_size+1 ),
                                       Point( dilation_size, dilation_size ) );
	erode(img, img,element);
	dilate(img, img,element);	// 图像膨胀*/
	
}
示例#20
0
void MOPSFeatures::getImagePyrimid(Mat& image, vector<ImgPyr>& imgPyr)
{
    Mat im = image;
    imgPyr.push_back(ImgPyr(image,0));

    float adj = 2;
    for (int i = 1; i <= nUpLevels_; i++) {
	Mat dst;
	pyrUp(image,dst);
	imgPyr.push_back(ImgPyr(dst,(float)1/adj));
	image = dst;
	adj *= 2;
    }

    image = im;
    adj = 2;
    for (int i = 1; i <= nDnLevels_; i++) {
	Mat dst;
	pyrDown(image,dst);
	imgPyr.push_back(ImgPyr(dst,adj));
	image = dst;
	adj *= 2;
    }
}
int main(int argc, char** argv) {
	cv::VideoCapture stream(0); // open video stream from any video source
	int count = 0;   
int frame_width = stream.get(CV_CAP_PROP_FRAME_WIDTH);
int frame_height = stream.get(CV_CAP_PROP_FRAME_HEIGHT);

VideoWriter outputVideo("salient_video.avi", CV_FOURCC('M', 'J', 'P', 'G'), 20 , Size(frame_width, frame_height), true); 
while(stream.isOpened())
{
  cv::Mat inputImage;
   if ( ! stream.read(inputImage) ) // try to read a frame
            break;
   
   Mat finalImage;
	Mat * ptr = NULL;
	pthread_t intensityThread, colorThread;
   long totaltime = timestamp();
	
	//long intTime = timestamp();
	IntensityImg = Get_Intensity_Image(inputImage);
	pthread_create(&intensityThread, NULL, intensity_processing, (void *) ptr);
	//pthread_join(intensityThread, NULL);
	//long intFinal = timestamp() - intTime;
	//cout << "Intensity Map Time: " << intFinal << "\n";
	
	ptr = &inputImage;
	//long colTime = timestamp();
	pthread_create(&colorThread, NULL, color_processing, (void *) ptr);
	//pthread_join(colorThread, NULL);
	//long colFinal = timestamp() - colTime;
	//cout << "Color Map Time: " << colFinal << "\n";
	
	//long orTime = timestamp();
	Mat AggOr = getGaborImage();
	normalize(AggOr, AggOr, 0, 255, NORM_MINMAX, -1);
	//long orFinal = timestamp() - orTime;
	//cout << "Orientation Map Time: " << orFinal << "\n";

        pthread_join(intensityThread, NULL);
        pthread_join(colorThread, NULL);

	
	finalImage = (AggInt + AggColor + AggOr) / 3;
	normalize(finalImage, finalImage, 0, 255, NORM_MINMAX, -1);

	for (int bCtr = 0; bCtr < 4; bCtr++) {
		pyrUp(finalImage, finalImage);
	}
	
	long finaltime = timestamp() - totaltime;
	cout << "Total Time: " << finaltime << "\n";
	Mat contImg;
	inRange(finalImage, 130, 255, contImg);
	vector < vector<Point> > contours;
	vector < Vec4i > hierarchy;

	findContours(contImg, contours, hierarchy, CV_RETR_CCOMP,
			CV_CHAIN_APPROX_SIMPLE);
	for (int i = 0; i >= 0; i = hierarchy[i][0]) {
		Scalar color(rand() & 255, rand() & 255, rand() & 255);
		drawContours(inputImage, contours, i, color, 3, 8, hierarchy);
	}


   
   outputVideo.write(inputImage);
  
}
	return 0;
}
示例#22
0
void SquareOcl::find_squares_cpu( const Mat& image, vector<vector<Point> >& squares )
{
    squares.clear();

    Mat pyr, timg, gray0(image.size(), CV_8U), gray;

    // down-scale and upscale the image to filter out the noise
    pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
    pyrUp(pyr, timg, image.size());
    vector<vector<Point> > contours;

    // find squares in every color plane of the image
    for( int c = 0; c < 3; c++ )
    {
        int ch[] = {c, 0};
        mixChannels(&timg, 1, &gray0, 1, ch, 1);

        // try several threshold levels
        for( int l = 0; l < SQUARE_OCL_THRESH_LEVEL_H; l++ )
        {
            // hack: use Canny instead of zero threshold level.
            // Canny helps to catch squares with gradient shading
            if( l == 0 )
            {
                // apply Canny. Take the upper threshold from slider
                // and set the lower to 0 (which forces edges merging)
                Canny(gray0, gray, 0, SQUARE_OCL_EDGE_THRESH_H, 5);
                // dilate canny output to remove potential
                // holes between edge segments
                dilate(gray, gray, Mat(), Point(-1,-1));
            }
            else
            {
                // apply threshold if l!=0:
                //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                gray = gray0 >= (l+1)*255/SQUARE_OCL_THRESH_LEVEL_H;
            }

            // find contours and store them all as a list
            findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

            vector<Point> approx;

            // test each contour
            for( size_t i = 0; i < contours.size(); i++ )
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);

                // square contours should have 4 vertices after approximation
                // relatively large area (to filter out noisy contours)
                // and be convex.
                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if( approx.size() == 4 &&
                        fabs(contourArea(Mat(approx))) > 1000 &&
                        isContourConvex(Mat(approx)) )
                {
                    double maxCosine = 0;

                    for( int j = 2; j < 5; j++ )
                    {
                        // find the maximum cosine of the angle between joint edges
                        double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
                        maxCosine = MAX(maxCosine, cosine);
                    }

                    // if cosines of all angles are small
                    // (all angles are ~90 degree) then write quandrange
                    // vertices to resultant sequence
                    if( maxCosine < 0.3 )
                        squares.push_back(approx);
                }
            }
        }
    }
}
    void initKernel(Mat& kernel, const Mat& blurredGray, const int width, const Mat& mask, 
                    const int pyrLevel, const int iterations, float thresholdR, float thresholdS) {
        
        assert(blurredGray.type() == CV_8U && "gray value image needed");
        assert(mask.type() == CV_8U && "mask should be binary image");
        
        // #ifndef NDEBUG
        //     imshow("blurred", blurredGray);
        // #endif
        
        // save min and maximum value of the original image to be able to restore
        // the latent image with the correct brightness
        double grayMin; double grayMax;
        minMaxLoc(blurredGray, &grayMin, &grayMax);

        // build an image pyramid with gray value images
        vector<Mat> pyramid, masks;
        pyramid.push_back(blurredGray);
        masks.push_back(mask);

        for (int i = 0; i < (pyrLevel - 1); i++) {
            Mat downImage, downMask;
            pyrDown(pyramid[i], downImage, Size(pyramid[i].cols/2, pyramid[i].rows/2));
            pyrDown(masks[i], downMask, Size(masks[i].cols/2, masks[i].rows/2));

            pyramid.push_back(downImage);
            masks.push_back(downMask);
        }

        // init kernel but in the iterations the tmp-kernel is used
        kernel = Mat::zeros(width, width, CV_32F);
        Mat tmpKernel;

        // go through image pyramid from small to large
        for (int l = pyramid.size() - 1; l >= 0; l--) {
            #ifdef IMWRITE
                imshow("pyr Image", pyramid[l]);
                double min; double max;
                minMaxLoc(pyramid[l], &min, &max);
                cout << "pyr: " << min << " " << max << endl;
                waitKey();
            #endif

            // compute image gradient for x and y direction
            // 
            // gaussian blur (in-place operation is supported)
            GaussianBlur(pyramid[l], pyramid[l], Size(3,3), 0, 0, BORDER_DEFAULT);

            // parameter for sobel filtering to obtain gradients
            array<Mat,2> gradients, tmpGradients;
            const int delta = 0;
            const int ddepth = CV_32F;
            const int ksize = 3;
            const int scale = 1;

            // gradient x and y
            Sobel(pyramid[l], tmpGradients[0], ddepth, 1, 0, ksize, scale, delta, BORDER_DEFAULT);
            Sobel(pyramid[l], tmpGradients[1], ddepth, 0, 1, ksize, scale, delta, BORDER_DEFAULT);

            // cut off gradients outside the mask
            tmpGradients[0].copyTo(gradients[0], masks[l]);
            tmpGradients[1].copyTo(gradients[1], masks[l]);

            // normalize gradients into range [-1,1]
            normalizeOne(gradients);

            // #ifdef IMWRITE
            //     showGradients("x gradient", gradients[0]);
            //     showGradients("y gradient", gradients[1]);
            // #endif
            

            // compute gradient confidence for al pixels
            Mat gradientConfidence;
            computeGradientConfidence(gradientConfidence, gradients, width, masks[l]);

            // #ifdef IMWRITE
            //     showFloat("confidence", gradientConfidence);
            // #endif


            // each iterations works on an updated image
            Mat currentImage;
            pyramid[l].copyTo(currentImage);

            // assert(iterations == 1 && "Implement multiple iterations");

            for (int i = 0; i < iterations; i++) {
                #ifdef IMWRITE
                    imshow("current Image", currentImage);
                    minMaxLoc(currentImage, &min, &max);
                    cout << "current: " << min << " " << max << endl;
                    waitKey();
                #endif

                // select edges for kernel estimation (normalized gradients [-1,1])
                array<Mat,2> selectedEdges;
                selectEdges(currentImage, gradientConfidence, thresholdR, thresholdS, selectedEdges);

                #ifdef IMWRITE
                    showGradients("x gradient selection", selectedEdges[0]);
                    showGradients("y gradient selection", selectedEdges[1]);
                    minMaxLoc(selectedEdges[0], &min, &max);
                    cout << "x gradients: " << min << " " << max << endl;
                    waitKey();
                #endif


                // estimate kernel with gaussian prior
                fastKernelEstimation(selectedEdges, gradients, kernel, 0.0);

                #ifdef IMWRITE
                    showFloat("tmp-kernel", kernel, true);
                    minMaxLoc(kernel, &min, &max);
                    cout << "kernel: " << min << " " << max << " sum: " << sum(kernel)[0] << endl;
                    waitKey();
                #endif


                // coarse image estimation with a spatial prior
                Mat latentImage;
                // FIXME: it looks like there are some edges of the gradients in the latent image.
                //        with more iterations it becomes worse
                // coarseImageEstimation(pyramid[l], kernel, selectedEdges, latentImage);
                
                // use oother spatial deconv method for now
                deconvolveIRLS(pyramid[l], latentImage, kernel);

                #ifdef IMWRITE
                    string name = "two-phase-latent-" + to_string(i);
                    imshow(name, latentImage);
                    waitKey();

                    string filename = name + ".png";
                    imwrite(filename, latentImage);
                #endif


                // set current image to coarse latent image
                latentImage.copyTo(currentImage);

                // decrease thresholds τ_r and τ_s will to include more and more edges
                thresholdR = thresholdR / 1.1;
                thresholdS = thresholdS / 1.1;
            }

            // set next pyramid image to the upscaled latent image
            if (l > 0) {
                Mat upImage;
                pyrUp(currentImage, upImage, Size(pyramid[l - 1].cols, pyramid[l - 1].rows));
                pyramid[l - 1] = upImage;
            }
        }

        // #ifdef IMWRITE
        //     imshow("kernel", kernel);
        //     waitKey();
        // #endif
    }
示例#24
0
    void process(InputArrayOfArrays src, OutputArray dst)
    {
        std::vector<Mat> images;
        src.getMatVector(images);
        checkImageDimensions(images);

        int channels = images[0].channels();
        CV_Assert(channels == 1 || channels == 3);
        Size size = images[0].size();
        int CV_32FCC = CV_MAKETYPE(CV_32F, channels);

        std::vector<Mat> weights(images.size());
        Mat weight_sum = Mat::zeros(size, CV_32F);

        for(size_t i = 0; i < images.size(); i++) {
            Mat img, gray, contrast, saturation, wellexp;
            std::vector<Mat> splitted(channels);

            images[i].convertTo(img, CV_32F, 1.0f/255.0f);
            if(channels == 3) {
                cvtColor(img, gray, COLOR_RGB2GRAY);
            } else {
                img.copyTo(gray);
            }
            split(img, splitted);

            Laplacian(gray, contrast, CV_32F);
            contrast = abs(contrast);

            Mat mean = Mat::zeros(size, CV_32F);
            for(int c = 0; c < channels; c++) {
                mean += splitted[c];
            }
            mean /= channels;

            saturation = Mat::zeros(size, CV_32F);
            for(int c = 0; c < channels;  c++) {
                Mat deviation = splitted[c] - mean;
                pow(deviation, 2.0f, deviation);
                saturation += deviation;
            }
            sqrt(saturation, saturation);

            wellexp = Mat::ones(size, CV_32F);
            for(int c = 0; c < channels; c++) {
                Mat expo = splitted[c] - 0.5f;
                pow(expo, 2.0f, expo);
                expo = -expo / 0.08f;
                exp(expo, expo);
                wellexp = wellexp.mul(expo);
            }

            pow(contrast, wcon, contrast);
            pow(saturation, wsat, saturation);
            pow(wellexp, wexp, wellexp);

            weights[i] = contrast;
            if(channels == 3) {
                weights[i] = weights[i].mul(saturation);
            }
            weights[i] = weights[i].mul(wellexp) + 1e-12f;
            weight_sum += weights[i];
        }
        int maxlevel = static_cast<int>(logf(static_cast<float>(min(size.width, size.height))) / logf(2.0f));
        std::vector<Mat> res_pyr(maxlevel + 1);

        for(size_t i = 0; i < images.size(); i++) {
            weights[i] /= weight_sum;
            Mat img;
            images[i].convertTo(img, CV_32F, 1.0f/255.0f);

            std::vector<Mat> img_pyr, weight_pyr;
            buildPyramid(img, img_pyr, maxlevel);
            buildPyramid(weights[i], weight_pyr, maxlevel);

            for(int lvl = 0; lvl < maxlevel; lvl++) {
                Mat up;
                pyrUp(img_pyr[lvl + 1], up, img_pyr[lvl].size());
                img_pyr[lvl] -= up;
            }
            for(int lvl = 0; lvl <= maxlevel; lvl++) {
                std::vector<Mat> splitted(channels);
                split(img_pyr[lvl], splitted);
                for(int c = 0; c < channels; c++) {
                    splitted[c] = splitted[c].mul(weight_pyr[lvl]);
                }
                merge(splitted, img_pyr[lvl]);
                if(res_pyr[lvl].empty()) {
                    res_pyr[lvl] = img_pyr[lvl];
                } else {
                    res_pyr[lvl] += img_pyr[lvl];
                }
            }
        }
        for(int lvl = maxlevel; lvl > 0; lvl--) {
            Mat up;
            pyrUp(res_pyr[lvl], up, res_pyr[lvl - 1].size());
            res_pyr[lvl - 1] += up;
        }
        dst.create(size, CV_32FCC);
        res_pyr[0].copyTo(dst.getMat());
    }
示例#25
0
void pkmSIFTFlow::computeFlow(Mat image1, Mat image2, int nchannels)
{
#ifdef _DEBUG
    assert(image1.cols == image2.cols &&
           image1.rows == image2.rows);
#endif
    
    width = image1.cols;
    height = image1.rows;
    
    sift1_level1 = image1;
    sift2_level1 = image2;
    
    pyrDown(sift1_level1, sift1_level2, cv::Size(width/2, height/2));
    pyrDown(sift2_level1, sift2_level2, cv::Size(width/2, height/2));
    
    pyrDown(sift1_level2, sift1_level3, cv::Size(width/4, height/4));
    pyrDown(sift2_level2, sift2_level3, cv::Size(width/4, height/4));
    
    pyrDown(sift1_level3, sift1_level4, cv::Size(width/8, height/8));
    pyrDown(sift2_level3, sift2_level4, cv::Size(width/8, height/8));
    
    if (!sift1_level3.isContinuous()) {
        printf("[ERROR] Matrix not continuous!!!\n");
    }
    if (!sift2_level3.isContinuous()) {
        printf("[ERROR] Matrix not continuous!!!\n");
    }
    if (!sift1_level2.isContinuous()) {
        printf("[ERROR] Matrix not continuous!!!\n");
    }
    if (!sift2_level2.isContinuous()) {
        printf("[ERROR] Matrix not continuous!!!\n");
    }
    if (!sift1_level1.isContinuous()) {
        printf("[ERROR] Matrix not continuous!!!\n");
    }
    if (!sift2_level1.isContinuous()) {
        printf("[ERROR] Matrix not continuous!!!\n");
    }
    bpflow.LoadImages(width, height, nchannels, sift1_level4.ptr<unsigned char>(0), sift2_level4.ptr<unsigned char>(0));
    bpflow.setPara(alpha, d);
    
    // first assume homogeneous setup
    bpflow.setHomogeneousMRF(wsize);    
    
    // level 4
    vx_level4 = Mat::zeros(height/8, width/8, CV_32FC1);
    vy_level4 = Mat::zeros(height/8, width/8, CV_32FC1);
    bpflow.LoadOffset(vx_level4.ptr(0), vy_level4.ptr(0));
    bpflow.LoadWinSize(winSizeX_level4.ptr(0), winSizeY_level4.ptr(0));
    
    bpflow.ComputeDataTerm();
    bpflow.ComputeRangeTerm(gamma);
    bpflow.MessagePassing(nIterations, 2, pEnergyList);
    bpflow.ComputeVelocity();
    {
        Mat flow(height/8, width/8, CV_32FC2, bpflow.flow().pData);
        vector<Mat> flows;
        split(flow, flows);
        
        vx_level4 = flows[0];
        vy_level4 = flows[1];
    }
    
    pyrUp(vx_level4, vx_level3, cv::Size(width/4, height/4));
    pyrUp(vy_level4, vy_level3, cv::Size(width/4, height/4));
    
    // level 3
    bpflow.LoadOffset(vx_level3.ptr(0), vy_level3.ptr(0));
    bpflow.LoadWinSize(winSizeX_level3.ptr(0), winSizeY_level3.ptr(0));
    
    bpflow.ComputeDataTerm();
    bpflow.ComputeRangeTerm(gamma);
    bpflow.MessagePassing(nIterations, 2, pEnergyList);
    bpflow.ComputeVelocity();
    {
        Mat flow(height/4, width/4, CV_32FC2, bpflow.flow().pData);
        vector<Mat> flows;
        split(flow, flows);
        
        vx_level3 = flows[0];
        vy_level3 = flows[1];
    }
    
    pyrUp(vx_level3, vx_level2, cv::Size(width/2, height/2));
    pyrUp(vy_level3, vy_level2, cv::Size(width/2, height/2));
    
    // level 2
    bpflow.LoadOffset(vx_level2.ptr(0), vy_level2.ptr(0));
    bpflow.LoadWinSize(winSizeX_level2.ptr(0), winSizeY_level2.ptr(0));
    
    bpflow.ComputeDataTerm();
    bpflow.ComputeRangeTerm(gamma);
    bpflow.MessagePassing(nIterations, 2, pEnergyList);
    bpflow.ComputeVelocity();
    {
        Mat flow(height/2, width/2, CV_32FC2, bpflow.flow().pData);
        vector<Mat> flows;
        split(flow, flows);
        
        vx_level2 = flows[0];
        vy_level2 = flows[1];
    }
    
    pyrUp(vx_level2, vx_level1, cv::Size(width, height));
    pyrUp(vy_level2, vy_level1, cv::Size(width, height));
    
    // level 1
    bpflow.LoadOffset(vx_level1.ptr(0), vy_level1.ptr(0));
    bpflow.LoadWinSize(winSizeX_level1.ptr(0), winSizeY_level1.ptr(0));
    
    bpflow.ComputeDataTerm();
    bpflow.ComputeRangeTerm(gamma);
    bpflow.MessagePassing(nIterations, 2, pEnergyList);
    bpflow.ComputeVelocity();
    {
        Mat flow(height, width, CV_32FC2, bpflow.flow().pData);
        vector<Mat> flows;
        split(flow, flows);
        
        vx_level1 = flows[0];
        vy_level1 = flows[1];
    }
    
}
int main(int argc, char** argv) {
	if (argc != 2) {
		cout << "No image" << endl;
		return -1;
	}
	cout << "Loading Image: ";
	cout << argv[1] << endl;
	Mat inputImage = imread(argv[1], CV_LOAD_IMAGE_COLOR);

	if (!inputImage.data) {
		cout << "Invalid Image" << endl;
	}
	Mat finalImage;
	Mat * ptr = NULL;
	pthread_t intensityThread, colorThread;
for(int counter = 0; counter <1; counter++)	
{

	long totaltime = timestamp();
	IntensityImg = Get_Intensity_Image(inputImage);
	
	//long intTime = timestamp();
	//IntensityImage_GPU = Get_Intensity_Image_GPU(inputImage);
	pthread_create(&intensityThread, NULL, intensity_processing, (void *) ptr);
	//pthread_create(&intensityThread, NULL, intensity_processing, (void *) ptrGPU);
	//pthread_join(intensityThread, NULL);
	//long intFinal = timestamp() - intTime;

	double maxInt;

	minMaxLoc(IntensityImg, NULL, &maxInt, NULL, NULL);
        //Normalize all color channels
        int i = 0, j = 0;
        Vec3b intensity = inputImage.at<Vec3b>(i,j);

	for(i=0; i<inputImage.rows; i++)//row
	{
		for(j=0; j<inputImage.cols; j++)//
		{
			if(inputImage.at<uchar>(i, j) >= 0.1 * maxInt)
			{
				intensity.val[0] = (intensity.val[0] * 255)/maxInt;//b
				intensity.val[1] = (intensity.val[1] * 255)/maxInt;//g
				intensity.val[2] = (intensity.val[2] * 255)/maxInt;//r	
				
			}
		}
	}


	ptr = &inputImage;
	//long colTime = timestamp();
	pthread_create(&colorThread, NULL, color_processing, (void *) ptr);
	//pthread_join(colorThread, NULL);
	//long colFinal = timestamp() - colTime;
	//cout << "Color Map Time: " << colFinal << "\n";
	
	//long orTime = timestamp();
         //Mat AggOr;
	 //Mat AggOr = getGaborImage(IntensityImg);
         Mat AggOr = getGaborImage();
	 normalize(AggOr, AggOr, 0, 255, NORM_MINMAX, -1);
	//long orFinal = timestamp() - orTime;
	//cout << "Orientation Map Time: " << orFinal << "\n";

        pthread_join(intensityThread, NULL);
        pthread_join(colorThread, NULL);

	//gpu::GpuMat temp = AggIntGPU;
	
	finalImage = (AggInt + AggColor + AggOr) / 3;
	normalize(finalImage, finalImage, 0, 255, NORM_MINMAX, -1);

	for (int bCtr = 0; bCtr < 4; bCtr++) {
		pyrUp(finalImage, finalImage);
	}
	
	long finaltime = timestamp() - totaltime;
	//cout << "Intensity Map Time: " << intFinal << "\n";
	//cout << "Color Map Time: " << colFinal << "\n";
	cout << "Total Time: " << finaltime << "\n";
}
	Mat contImg;
	inRange(finalImage, 160, 230, contImg);
	vector < vector<Point> > contours;
	vector < Vec4i > hierarchy;

	findContours(contImg, contours, hierarchy, CV_RETR_CCOMP,
			CV_CHAIN_APPROX_SIMPLE);
	for (int i = 0; i >= 0; i = hierarchy[i][0]) {
		Scalar color(rand() & 255, rand() & 255, rand() & 255);
		drawContours(inputImage, contours, i, color, 3, 8, hierarchy);
	}

	imwrite("Salient_Image.jpg", inputImage);

	waitKey(0);
	return 0;
}
int main(int argc, char** argv)
{
long totaltime, intTime, intTime_o, colTime, colTime_o , orTime, orTime_o;
if(argc != 2)
{
	cout << "No image"<<endl;
	return -1;
}
cout<<"Loading Image: ";
cout<< argv[1]<<endl;
Mat inputImage = imread(argv[1], CV_LOAD_IMAGE_COLOR);

if(!inputImage.data)
{
	cout <<"Invalid Image"<<endl;
}

Mat IntensityImg, finalImage;
for(int counter = 0; counter < 1; counter++)
{
totaltime = timestamp();

intTime = timestamp();
IntensityImg = Get_Intensity_Image(inputImage);
vector<Mat> Intensity_Maps = Pyr_CenSur(IntensityImg);
Mat AggInt = aggregateMaps(Intensity_Maps);
normalize(AggInt, AggInt, 0, 255, NORM_MINMAX, -1);
intTime_o = timestamp() - intTime;

colTime = timestamp();
vector<Mat> color_map;
color_map = Normalize_color(inputImage, IntensityImg);
vector<Mat> RGBYMap(6); 
for(int i = 0; i<6; i++)
addWeighted(color_map[i], 0.5, color_map[i+6], 0.5, 0, RGBYMap[i], -1);
Mat AggColor = aggregateMaps(RGBYMap);
normalize(AggColor, AggColor, 0, 255, NORM_MINMAX, -1);
colTime_o = timestamp() - colTime;

orTime = timestamp();
Mat AggOr;
AggOr = getGaborImage(IntensityImg);
normalize(AggOr, AggOr, 0, 255, NORM_MINMAX, -1);
orTime_o = timestamp() - orTime;

finalImage = (AggInt + AggColor + AggOr) /3;
normalize(finalImage, finalImage, 0, 255, NORM_MINMAX, -1);

for(int bCtr = 0; bCtr<4; bCtr++)
{
	pyrUp(finalImage, finalImage);
}

cout <<"Intensity Time: "<< (intTime_o) << "\n";
cout <<"Color Time: "<< (colTime_o) << "\n";
cout <<"Orientation Time: "<< (orTime_o) << "\n";
cout <<"Total Time: "<< (timestamp() - totaltime) << "\n";
}



Mat contImg;
inRange(finalImage, 160, 230, contImg);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;

findContours(contImg, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
for(int i = 0; i>=0; i =hierarchy[i][0])
{
	Scalar color(rand()&255, rand()&255, rand()&255);
	drawContours(inputImage, contours, i, color, 3, 8, hierarchy);
}

imwrite("Salient_Image.jpg" , inputImage);

waitKey(0);
return 0;
}
示例#28
0
vector<Mat> Normalize_color(Mat inputImage, Mat IntensityImg)
{

	double maxInt;

	minMaxLoc(IntensityImg, NULL, &maxInt, NULL, NULL);

//Normalize all color channels
int i = 0, j = 0;
Vec3b intensity = inputImage.at<Vec3b>(i,j);

	for(i=0; i<inputImage.rows; i++)//row
	{
		for(j=0; j<inputImage.cols; j++)//
		{
			if(inputImage.at<uchar>(i, j) >= 0.1 * maxInt)
			{
				intensity.val[0] = (intensity.val[0] * 255)/maxInt;//b
				intensity.val[1] = (intensity.val[1] * 255)/maxInt;//g
				intensity.val[2] = (intensity.val[2] * 255)/maxInt;//r	
				
			}
		}
	}

//generate the channel y
vector<Mat> rgby(4);
vector<Mat> temp;
split(inputImage, temp);

rgby[0] = temp[0] - (temp[1] + temp[2])/2;
rgby[1] = temp[1] - (temp[0] + temp[2])/2;
rgby[2] = temp[2] - (temp[1] + temp[0])/2;
rgby[3] = (temp[2] + temp[1])/2 - abs((temp[2] - temp[1])/2) - temp[0];

threshold(rgby[3], rgby[3], 0, 255, THRESH_TOZERO);
vector<Mat> red_pyr(9);
vector<Mat> green_pyr(9);
vector<Mat> blue_pyr(9);
vector<Mat> yellow_pyr(9);

red_pyr = Get_GaussianPyramid(rgby[2]);
green_pyr = Get_GaussianPyramid(rgby[1]);
blue_pyr = Get_GaussianPyramid(rgby[0]);
yellow_pyr = Get_GaussianPyramid(rgby[3]);

Mat r_g, b_y, output_rg, output_by;
vector<Mat> color_map(12);

for(int c = 2; c<=4; c++)
{
	for(int delta = 3; delta<=4; delta++)
	{
		r_g = green_pyr[c+delta] - red_pyr[c+delta];
		b_y = yellow_pyr[c+delta] - blue_pyr[c+delta];
		for(int bCtr =1; bCtr<=delta; bCtr++)
		{
			pyrUp(r_g, output_rg, Size(r_g.cols*2, r_g.rows*2));
			r_g = output_rg;
			pyrUp(b_y, output_by, Size(b_y.cols*2, b_y.rows*2));
			b_y = output_by;
		}
		if(red_pyr[c].size() != output_rg.size())
		{
			color_map[2*c+delta-7] = abs((red_pyr[c] - green_pyr[c]) - output_rg(Range(0, red_pyr[c].rows), Range(0, red_pyr[c].cols)));
color_map[2*c+delta-1] = abs((blue_pyr[c] - yellow_pyr[c]) - output_by(Range(0, red_pyr[c].rows), Range(0, red_pyr[c].cols)));
		}
		else
		{
			color_map[2*c+delta-7] = abs((red_pyr[c] - green_pyr[c]) - output_rg);
			color_map[2*c+delta-1] = abs((blue_pyr[c] - yellow_pyr[c]) - output_by);
		}
	}
}

return color_map;
}