void CutoutImage::translucentEdge( const cv::Mat srcMat, const cv::Mat smoothMask, const cv::Mat liteMask, cv::Mat &dstMat ) //liteMask 是边缘模糊之前的数据 { cv::Mat smoothMask8uc1; smoothMask.convertTo(smoothMask8uc1, CV_8UC1 ,255.0); int rows = smoothMask8uc1.rows; int cols = smoothMask8uc1.cols; //cv::threshold(smoothMask8uc1, bitSmoothMask, 1, 255, CV_THRESH_BINARY ); cv::Mat srcMatClone = srcMat.clone(); cv::Mat liteMaskClone = liteMask.clone(); cv::Mat edgeSmoothMask = cv::Mat( smoothMask.size(), CV_8UC1, cv::Scalar(0)); cv::cvtColor(srcMatClone, srcMatClone, CV_BGR2BGRA); for(int y = 0; y < rows; y++){ uchar *liteMaskCloneRowData = liteMaskClone.ptr<uchar>(y); uchar *smoothMask8uc1RowData = smoothMask8uc1.ptr<uchar>(y); uchar *srcMatCloneRowData = srcMatClone.ptr<uchar>(y); for (int x = 0; x < cols; x++) { //if(smoothMask8uc1RowData[x] != 0 && liteMaskCloneRowData[x] == 0) //if(smoothMask8uc1RowData[x] != 0 && smoothMask8uc1RowData[x] != 255) { srcMatCloneRowData[x*4 + 3] = smoothMask8uc1RowData[x]; } } } dstMat = srcMatClone.clone(); //cv::imwrite("cc.png", srcMat); //cv::imwrite("ccc.png", srcMatClone); //cv::waitKey(0); }
void AutoCorr::autocorrDFT(const cv::Mat &img, cv::Mat &dst) { //Convert image from unsigned char to float matrix cv::Mat fImg; img.convertTo(fImg, CV_32FC1); //Subtract the mean cv::Mat mean(fImg.size(), fImg.type(), cv::mean(fImg)); cv::subtract(fImg, mean, fImg); //Calculate the optimal size for the dft output. //This increases speed. cv::Size dftSize; dftSize.width = cv::getOptimalDFTSize(2 * img.cols +1 ); dftSize.height = cv::getOptimalDFTSize(2 * img.rows +1); //prepare the destination for the dft dst = cv::Mat(dftSize, CV_32FC1, cv::Scalar::all(0)); //transform the image into the frequency domain cv::dft(fImg, dst); //calculate DST * DST (don't mind the fourth parameter. It is ignored) cv::mulSpectrums(dst, dst, dst, cv::DFT_INVERSE, true); //transform the result back to the image domain cv::dft(dst, dst, cv::DFT_INVERSE | cv::DFT_SCALE); //norm the result cv::multiply(fImg,fImg,fImg); float denom = cv::sum(fImg)[0]; dst = dst * (1/denom); }
Array<float> CvMatToOpOutput::createArray(const cv::Mat& cvInputData, const double scaleInputToOutput, const Point<int>& outputResolution) const { try { // Security checks if (cvInputData.empty()) error("Wrong input element (empty cvInputData).", __LINE__, __FUNCTION__, __FILE__); if (cvInputData.channels() != 3) error("Input images must be 3-channel BGR.", __LINE__, __FUNCTION__, __FILE__); if (cvInputData.cols <= 0 || cvInputData.rows <= 0) error("Input images has 0 area.", __LINE__, __FUNCTION__, __FILE__); if (outputResolution.x <= 0 || outputResolution.y <= 0) error("Output resolution has 0 area.", __LINE__, __FUNCTION__, __FILE__); // outputData - Reescale keeping aspect ratio and transform to float the output image const cv::Mat frameWithOutputSize = resizeFixedAspectRatio(cvInputData, scaleInputToOutput, outputResolution); Array<float> outputData({outputResolution.y, outputResolution.x, 3}); frameWithOutputSize.convertTo(outputData.getCvMat(), CV_32FC3); // Return result return outputData; } catch (const std::exception& e) { error(e.what(), __LINE__, __FUNCTION__, __FILE__); return Array<float>{}; } }
void niblackThreshold( const cv::Mat& src, cv::Mat& dst, int windowSize ,int c, float k) { cv::Mat meanMat, std_dev, srcf; src.convertTo( srcf, CV_32FC1 ); #ifdef USE_OPENCV cv::Mat mean2; cv::Size window(windowSize, windowSize); cv::blur(srcf, meanMat, window); cv::blur(srcf.mul(srcf), mean2, window); cv::sqrt(mean2 - meanMat.mul(meanMat), std_dev); #else meanWithDeviation(srcf,meanMat,std_dev,windowSize); #endif dst = cv::Mat( src.size(), src.type() ); for( int j = 0; j < dst.rows; ++j ) { for( int i = 0; i < dst.cols; ++i ) { //pixel = ( pixel > mean + k * standard_deviation - c) ? object : background dst.at<uchar>(j,i) = ( srcf.at<float>(j,i) > meanMat.at<float>(j,i) + k * std_dev.at<float>(j,i) - c ) ? 255 : 0; } } return; }
void show_depth(const cv::Mat& depth) { cv::Mat display; //cv::normalize(depth, display, 0, 255, cv::NORM_MINMAX, CV_8U); depth.convertTo(display, CV_8U, 255.0/4000); cv::imshow("Depth", display); }
int EMVisi2::setModel(const cv::Mat im1, const cv::Mat mask) { if (proba.empty()) { dL = cv::Mat(im1.size(), CV_32FC1); ncc = cv::Mat(im1.size(), CV_32FC1); sum = cv::Mat(im1.size(), CV_32FC1); proba = cv::Mat(im1.size(), CV_32FC1); visi_proba = cv::Mat(im1.size(), CV_32FC1); nccproba_v = cv::Mat(im1.size(), CV_32FC1); nccproba_h = cv::Mat(im1.size(), CV_32FC1); ratio = cv::Mat(im1.size(), CV_32FC(im1.channels())); im1f = cv::Mat(im1.size(), CV_32FC(im1.channels())); } if (im1.channels() > 1) { cv::Mat gray; cv::cvtColor(im1, gray, COLOR_RGB2GRAY); fncc.setModel(gray, mask); } else { fncc.setModel(im1, mask); } im1.convertTo(im1f, im1f.type()); this->mask = mask; return 0; }
void DrawHistogram(const cv::Mat& histogram, cv::Mat& draw_img, int width) { double minval, maxval; cv::minMaxLoc(histogram, &minval, &maxval); cv::Mat norm_hist; histogram.convertTo(norm_hist, CV_32FC1, (double)width / maxval); if(histogram.cols == 1){ draw_img = cv::Mat::zeros(histogram.rows, width, CV_8UC1); for(int r=0; r<histogram.rows; r++){ for(int c=0; c<norm_hist.at<float>(r,0); c++){ draw_img.at<unsigned char>(r, c) = 255; } } } else if(histogram.rows == 1){ draw_img = cv::Mat::zeros(width, histogram.cols, CV_8UC1); for(int c=0; c<histogram.cols; c++){ for(int r=0; r<norm_hist.at<float>(0,c); r++){ draw_img.at<unsigned char>(width - r -1, c) = 255; } } } }
void drwnNNGraphImage::appendNodeFeatures(const drwnNNGraphImageData& image, const cv::Mat& features) { DRWN_ASSERT(((int)image.width() == features.cols) && ((int)image.height() == features.rows)); DRWN_ASSERT(image.numSegments() == this->numNodes()); // convert to 32-bit floating point (if not already) if (features.depth() != CV_32F) { cv::Mat tmp(features.rows, features.cols, CV_8U); features.convertTo(tmp, CV_32F, 1.0, 0.0); return appendNodeFeatures(image, tmp); } // compute mean pixel feature over each superpixel vector<float> phi(image.numSegments(), 0.0f); for (unsigned y = 0; y < image.height(); y++) { for (unsigned x = 0; x < image.width(); x++) { const float p = features.at<float>(y, x); for (int c = 0; c < image.segments().channels(); c++) { const int segId = image.segments()[c].at<int>(y, x); if (segId < 0) continue; phi[segId] += p; } } } for (unsigned segId = 0; segId < phi.size(); segId++) { DRWN_ASSERT(isfinite(phi[segId])); VectorXf newFeatures(_nodes[segId].features.rows() + 1); newFeatures.head(_nodes[segId].features.rows()) = _nodes[segId].features; newFeatures[_nodes[segId].features.rows()] = phi[segId] / (float)image.segments().pixels(segId); _nodes[segId].features = newFeatures; } }
void ProcessDepth::setRef(cv::Mat image, bool improve) { if(improve) { for(int imgY= 0; imgY<image.rows;imgY++) { cv::Mat lineOfImage = image.row(imgY); std::vector<u_int16_t> tempSort; // std::nth_element(line.begin<u_int16_t>, line.begin<u_int16_t> + line.cols/2, line.end<u_int16_t>()); cv::MatIterator_<u_int16_t> it, end; for( it = lineOfImage.begin<u_int16_t>(), end = lineOfImage.end<u_int16_t>(); it != end; ++it) { tempSort.push_back(*it); } std::nth_element(tempSort.begin(), tempSort.begin() + tempSort.size()/2, tempSort.end()); u_int16_t median = tempSort[tempSort.size()/2]; cv::line( image,cv::Point2i(0,imgY),cv::Point2i(image.cols,imgY),median,1,8); } image.convertTo(mRefImage,CV_32FC1); // RefImage = image; } else { mRefImage = image; } }
void RunningBackground::update(cv::Mat frame, cv::Mat& thresholded) { if(needToReset || accumulator.empty()) { needToReset = false; frame.convertTo(accumulator, CV_32F); } accumulator.convertTo(background, CV_8U); switch(differenceMode) { case ABSDIFF: cv::absdiff(background, frame, foreground); break; case BRIGHTER: cv::subtract(frame, background, foreground); break; case DARKER: cv::subtract(background, frame, foreground); break; } ofxCv::copyGray(foreground, foregroundGray); int thresholdMode = ignoreForeground ? cv::THRESH_BINARY_INV : cv::THRESH_BINARY; cv::threshold(foregroundGray, thresholded, thresholdValue, 255, thresholdMode); float curLearningRate = learningRate; if(useLearningTime) { curLearningRate = 1. - powf(1. - (thresholdValue / 255.), 1. / learningTime); } if(ignoreForeground) { cv::accumulateWeighted(frame, accumulator, curLearningRate, thresholded); cv::bitwise_not(thresholded, thresholded); } else { cv::accumulateWeighted(frame, accumulator, curLearningRate); } }
void DepthSampler::sample(cv::Mat source, std::vector<cv::Mat> layers, std::vector<cv::Point2i>& out){ cv::Mat localSource; std::vector<cv::Mat> tempLayers; source.convertTo(localSource,CV_32FC3); cv::split(localSource, tempLayers); cv::Mat dM(localSource.rows,localSource.cols, CV_32FC1); dM=(tempLayers[1]*256) + tempLayers[2]; out.resize(0); int nLayers=layers.size(); float localStep=minInd+(step*(double)nLayers); for ( std::vector<cv::Mat>::iterator it= layers.begin(); it != layers.end(); it++){ for (int xIm=0; xIm< source.cols; xIm+=(int)localStep) { for (int yIm=0; yIm<source.rows ; yIm+=(int)localStep) { if ((int)it->at<char>(yIm,xIm) != 0){ float d=dM.at<float>(yIm,xIm); if ((d != 0)&&(d<maxDistance)){ out.push_back(cv::Point2i(xIm,yIm)); } } } } localStep=localStep-step; } }
void ControllerImageFusion::correctBritness(cv::Mat& image, cv::Mat source) { float colRatioDb = image.cols/source.cols; float rowRatioDb = image.rows/source.rows; float val; if(std::modf(colRatioDb, &val)!= 0 || std::modf(rowRatioDb, &val) != 0) { return; } int colRatio = (int)(colRatioDb); int rowRatio = (int)(rowRatioDb); if(colRatio<=1 || rowRatio<=1) { return; } cv::Mat sourceConverted; source.convertTo(sourceConverted, CV_32F); int rows = source.rows; int cols = source.cols; for(int i = 0; i<rows-1; i++) { const float* Mi = sourceConverted.ptr<float>(i); for(int j = 0; j<cols-1; j++) { cv::Mat temp = cv::Mat(image, cv::Rect(j*colRatio, i*rowRatio, colRatio, rowRatio)); cv::Scalar mean = cv::mean(temp); mean.val[0] = Mi[j]-mean.val[0]; cv::add(temp, mean, temp); } } }
bool task1(const cv::Mat& image) { cv::Mat manual, buildIn, diff, tmp; std::vector<cv::Mat> channels; scaleImage(image, manual, 2, 100); image.convertTo(buildIn, -1, 2, 100); cv::absdiff(manual, buildIn, diff); cv::split(diff, channels); std::cout << "Max difference element: "; for (VMit it = channels.begin(); it != channels.end(); ++it) { int max = *(std::max_element((*it).begin<uchar>(), (*it).end<uchar>())); std::cout << max << " "; } std::cout << std::endl; std::vector<cv::Mat> scales(5, cv::Mat()); std::vector<cv::Mat> dst(2, cv::Mat()); for (int i = 0; i < task1c; ++i) { scaleImage(image, scales[i], task1v[2 * i], task1v[2 * i + 1]); } concatImages(scales[0], scales[1], dst[0]); concatChannels(dst[0], dst[0]); concatImages(scales[2], scales[3], dst[1]); concatImages(dst[1], scales[4], dst[1]); concatChannels(dst[1], dst[1]); return cv::imwrite(PATH + "Task1Lena01.jpg", dst[0]) && cv::imwrite(PATH + "Task1Lena345.jpg", dst[1]); }
void performHighPass(const cv::Mat& image, cv::Mat& res, int rad) { cv::Mat grey, tmp; cv::cvtColor(image, grey, CV_BGR2GRAY); grey.convertTo(grey, CV_32F); grey.copyTo(res); res.convertTo(res, CV_8U); std::vector<cv::Mat> planes(2, cv::Mat()); std::vector<cv::Mat> polar(2, cv::Mat()); cv::dft(grey, tmp, cv::DFT_COMPLEX_OUTPUT); cv::split(tmp, planes); cv::cartToPolar(planes[0], planes[1], polar[0], polar[1]); visualization(polar[0], tmp); concatImages(res, tmp, res); rearrangeQuadrants(polar[0]); highPassFilter(polar[0], rad); rearrangeQuadrants(polar[0]); visualization(polar[0], tmp); tmp.convertTo(tmp, res.type()); concatImages(res, tmp, res); cv::polarToCart(polar[0], polar[1], planes[0], planes[1]); cv::merge(planes, tmp); cv::dft(tmp, tmp, cv::DFT_SCALE | cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT); tmp.convertTo(tmp, CV_8U); concatImages(res, tmp, res); }
cv::Mat rescaleImageIntensity(const cv::Mat& img, ScaleType type) { cv::Mat rval; if (type == ScaleNone) { if (img.depth() == CV_8U) { rval = img.clone(); } else { double fmin, fmax; cv::minMaxLoc(img, &fmin, &fmax); if (fmax - fmin <= 1) { rval = cv::Mat(img.rows, img.cols, CV_8U); cv::convertScaleAbs(img, rval, 255); } else { img.convertTo(rval, CV_8U); } } } else { cv::Mat fsrc; if (img.depth() != at::REAL_IMAGE_TYPE) { img.convertTo(fsrc, at::REAL_IMAGE_TYPE); } else { fsrc = img; } cv::Mat tmp; if (type == ScaleMinMax) { double fmin, fmax; cv::minMaxLoc(fsrc, &fmin, &fmax); tmp = 255*((fsrc-fmin)/(fmax-fmin)); } else { at::real fmag = cv::norm(fsrc, cv::NORM_INF); tmp = 127 + 127*fsrc/fmag; } tmp.convertTo(rval, CV_8U); } return rval; }
void WatershedSegmenter::segmented_image(cv::Mat& dest) { cv::Mat wshedMask = process(); cv::Mat mask; convertScaleAbs(wshedMask, mask, 1, 0); double thresh = threshold(mask, mask, 1, 255, THRESH_BINARY); bitwise_and(image, image, dest, mask); dest.convertTo(dest,CV_8U); }
void visualization(const cv::Mat& magnitude, cv::Mat& res) { res = magnitude + cv::Scalar::all(1); // switch to logarithmic scale cv::log(res, res); //rearrangeQuadrants(res); cv::normalize(res, res, 0, 1, CV_MINMAX); res *= 255; res.convertTo(res, CV_8U); }
cv::Mat colorizeDepth(const cv::Mat& dMap, float min, float max) { cv::Mat d8Bit = cv::Mat::zeros(dMap.rows,dMap.cols,CV_8UC1); cv::Mat dColor; dMap.convertTo(d8Bit,CV_8UC1, 255./(max-min)); cv::applyColorMap(d8Bit,dColor,cv::COLORMAP_JET); return dColor; }
void showResult(cv::Mat initial, cv::Mat result) { initial = initial.clone(); result = result.clone(); initial.convertTo(initial, CV_8UC1); result.convertTo(result, CV_8UC1); float ratio = initial.rows / (float)initial.cols; cv::resize(initial, initial, cv::Size(400, static_cast<int>(400.f * ratio)), 0, 0, CV_INTER_NN); cv::resize(result, result, cv::Size(400, static_cast<int>(400.f * ratio)), 0, 0, CV_INTER_NN); cv::imshow("input", initial); cv::imshow("output", result); cv::waitKey(); }
void LetterClassifier::LoadImages(std::vector< std::vector< std::string > > &file_names, cv::Mat &dataset, cv::Mat &labels, cv::Mat &test_dataset, cv::Mat &test_labels) { int num_train_data = 0, num_test_data = 0; std::vector< int > num_data; for ( size_t i = 0; i < file_names.size(); i++ ) { int num_data_per_class = (int)(file_names[i].size()/(params.train_test_ratio + 1))*params.train_test_ratio; num_data_per_class = (num_data_per_class > params.max_samples_class) ? params.max_samples_class : num_data_per_class; num_data.push_back(num_data_per_class); num_test_data += ((int)file_names[i].size() - num_data_per_class); num_train_data += num_data_per_class; } std::cout << "Number of TEST data: " << num_test_data << std::endl; std::cout << "Number of TRAIN data: " << num_train_data << std::endl; int k = 0, l = 0; dataset = cv::Mat(cv::Size(num_train_data, params.letter_size.area()), CV_32F); labels = cv::Mat(cv::Size(1, num_train_data), CV_32F); test_dataset = cv::Mat(cv::Size(num_test_data, params.letter_size.area()), CV_32F); test_labels = cv::Mat(cv::Size(1, num_test_data), CV_32F); for ( size_t i = 0; i < file_names.size(); i++ ) { cv::Mat img; for ( int j = 0; j < (int)file_names[i].size(); j++ ) { img = cv::imread(file_names[i][j], CV_LOAD_IMAGE_GRAYSCALE); cv::resize(img, img, params.letter_size); img.convertTo(img, CV_32F); img = img.reshape(1, params.letter_size.area()); if (j < num_data[i]) { img.copyTo(dataset.col(k)); labels.at<float>(k) = (float)i; k++; } else { img.copyTo(test_dataset.col(l)); test_labels.at<float>(l) = (float)i; l++; } } } labels.convertTo(labels, CV_32S); test_labels.convertTo(test_labels, CV_32S); }
static void makeDepth32f(cv::Mat& source, cv::Mat& output) { if (source.depth() != CV_32F) { source.convertTo(output, CV_32F); } else { output = source; } }
/* Expects an 8-bit image, where the region of interest is 255 */ void Auvsi_Radon::setImage(cv::Mat inputImage) { cv::Mat rInFloat( inputImage.size(), CV_32F ); inputImage.convertTo( rInFloat, rInFloat.type(), 1.0/255.0f ); image = rInFloat; }
// some crazy stuff, no idea what's happening there. So thanks Alvaro & Niklas! bool CameraCalibration::backProject(const cv::Mat& boardRot64, const cv::Mat& boardTrans64, const vector<cv::Point2f>& imgPt, vector<cv::Point3f>& worldPt) { if( imgPt.size() == 0 ) { return false; } else { cv::Mat imgPt_h = cv::Mat::zeros(3, imgPt.size(), CV_32F); for( int h=0; h<imgPt.size(); ++h ) { imgPt_h.at<float>(0,h) = imgPt[h].x; imgPt_h.at<float>(1,h) = imgPt[h].y; imgPt_h.at<float>(2,h) = 1.0f; } Mat Kinv64 = getUndistortedIntrinsics().getCameraMatrix().inv(); Mat Kinv,boardRot,boardTrans; Kinv64.convertTo(Kinv, CV_32F); boardRot64.convertTo(boardRot, CV_32F); boardTrans64.convertTo(boardTrans, CV_32F); // Transform all image points to world points in camera reference frame // and then into the plane reference frame Mat worldImgPt = Mat::zeros( 3, imgPt.size(), CV_32F ); Mat rot3x3; Rodrigues(boardRot, rot3x3); Mat transPlaneToCam = rot3x3.inv()*boardTrans; for( int i=0; i<imgPt.size(); ++i ) { Mat col = imgPt_h.col(i); Mat worldPtcam = Kinv*col; Mat worldPtPlane = rot3x3.inv()*(worldPtcam); float scale = transPlaneToCam.at<float>(2)/worldPtPlane.at<float>(2); Mat worldPtPlaneReproject = scale*worldPtPlane-transPlaneToCam; cv::Point3f pt; pt.x = worldPtPlaneReproject.at<float>(0); pt.y = worldPtPlaneReproject.at<float>(1); pt.z = 0; worldPt.push_back(pt); } } return true; }
void CameraParameters::setParams(cv::Mat cameraMatrix,cv::Mat distorsionCoeff,cv::Size size) { if (cameraMatrix.rows!=3 || cameraMatrix.cols!=3) throw cv::Exception(9000,"invalid input cameraMatrix","CameraParameters::setParams",__FILE__,__LINE__); cameraMatrix.convertTo(CameraMatrix,CV_32FC1); if ( distorsionCoeff.total()<4 || distorsionCoeff.total()>5 ) throw cv::Exception(9000,"invalid input distorsionCoeff","CameraParameters::setParams",__FILE__,__LINE__); cv::Mat auxD; distorsionCoeff.convertTo( auxD,CV_32FC1); //now, get only the 4 first elements Distorsion.create(1,4,CV_32FC1); for (int i=0;i<4;i++) Distorsion.ptr<float>(0)[i]=auxD.ptr<float>(0)[i]; CamSize=size; }
void AdaptiveBackgroundLearning::process(const cv::Mat &img_input, cv::Mat &img_output, cv::Mat &img_bgmodel) { char path1[1000],path2[1000]; if(img_input.empty()) return; loadConfig(); if(firstTime) saveConfig(); if(img_background.empty()) img_input.copyTo(img_background); cv::Mat img_input_f(img_input.size(), CV_32F); img_input.convertTo(img_input_f, CV_32F, 1./255.); cv::Mat img_background_f(img_background.size(), CV_32F); img_background.convertTo(img_background_f, CV_32F, 1./255.); cv::Mat img_diff_f(img_input.size(), CV_32F); cv::absdiff(img_input_f, img_background_f, img_diff_f); if((limit > 0 && limit < counter) || limit == -1) { img_background_f = alpha*img_input_f + (1-alpha)*img_background_f; cv::Mat img_new_background(img_input.size(), CV_8U); img_background_f.convertTo(img_new_background, CV_8U, 255.0/(maxVal - minVal), -minVal); img_new_background.copyTo(img_background); if(limit > 0 && limit < counter) counter++; } cv::Mat img_foreground(img_input.size(), CV_8U); img_diff_f.convertTo(img_foreground, CV_8U, 255.0/(maxVal - minVal), -minVal); if(img_foreground.channels() == 3) cv::cvtColor(img_foreground, img_foreground, CV_BGR2GRAY); if(enableThreshold) cv::threshold(img_foreground, img_foreground, threshold, 255, cv::THRESH_BINARY); if(showForeground){ cv::imshow("A-Learning FG", img_foreground); } if(showBackground){ cv::imshow("A-Learning BG", img_background); } img_foreground.copyTo(img_output); img_background.copyTo(img_bgmodel); firstTime = false; }
cv::Mat getDepthDrawableImage(cv::Mat depth_image) { cv::Mat drawable; depth_image.convertTo(drawable, CV_8UC1, 255.0/1000); drawable = 255 - drawable; return drawable; }
LocalBinaryPattern::LocalBinaryPattern( const cv::Mat& img) : RFeatures::FeatureOperator( img.size()), _imgRct( 0, 0, img.cols, img.rows) { assert( img.channels() == 1); assert( img.isContinuous()); cv::Mat iimg; img.convertTo( iimg, CV_32F); cv::integral( iimg, _intImg, CV_64F); } // end ctor
//_______________________________________________ double FM::LaplacianEnergy::measure_focus( cv::Mat image ) const { assert( check_image( image ) ); image.convertTo( image, CV_8UC1 ); cv::Mat processed; cv::Laplacian( image, processed, CV_8U ); cv::pow( processed, 2, processed ); return cv::sum( processed )[0]; }
void rotateFlowCloud(cv::Mat & out_flow, const cv::Mat & in_flow, const cv::Mat & transform_mat) { Mat flow_reshaped; flow_reshaped = in_flow.reshape(1, in_flow.rows * in_flow.cols); //transform out_flow = transform_mat*flow_reshaped.t(); out_flow.convertTo(out_flow, CV_32FC1); }
static cv::Mat convertTo(const cv::Mat &mat, int depth) { if (mat.depth() == depth) return mat; cv::Mat result; mat.convertTo(result, depth); return result; }