virtual void run(InputArrayOfArrays _points2d) { std::vector<Mat> points2d; _points2d.getMatVector(points2d); CV_Assert( _points2d.total() >= 2 ); // Parse 2d points to Tracks Tracks tracks; parser_2D_tracks(points2d, tracks); // Set libmv logs level libmv_initLogging(""); if (libmv_reconstruction_options_.verbosity_level >= 0) { libmv_startDebugLogging(); libmv_setLoggingVerbosity( libmv_reconstruction_options_.verbosity_level); } // Perform reconstruction libmv_reconstruction_ = *libmv_solveReconstruction(tracks, &libmv_camera_intrinsics_options_, &libmv_reconstruction_options_); }
void Feature2D::compute( InputArrayOfArrays _images, std::vector<std::vector<KeyPoint> >& keypoints, OutputArrayOfArrays _descriptors ) { CV_INSTRUMENT_REGION(); if( !_descriptors.needed() ) return; vector<Mat> images; _images.getMatVector(images); size_t i, nimages = images.size(); CV_Assert( keypoints.size() == nimages ); CV_Assert( _descriptors.kind() == _InputArray::STD_VECTOR_MAT ); vector<Mat>& descriptors = *(vector<Mat>*)_descriptors.getObj(); descriptors.resize(nimages); for( i = 0; i < nimages; i++ ) { compute(images[i], keypoints[i], descriptors[i]); } }
void ACFFeatureEvaluatorImpl::setChannels(InputArrayOfArrays channels) { channels_.clear(); vector<Mat> ch; channels.getMatVector(ch); for( size_t i = 0; i < ch.size(); ++i ) { const Mat &channel = ch[i]; Mat_<int> acf_channel = Mat_<int>::zeros(channel.rows / 4, channel.cols / 4); for( int row = 0; row < channel.rows; row += 4 ) { for( int col = 0; col < channel.cols; col += 4 ) { int sum = 0; for( int cell_row = row; cell_row < row + 4; ++cell_row ) for( int cell_col = col; cell_col < col + 4; ++cell_col ) sum += (int)channel.at<float>(cell_row, cell_col); acf_channel(row / 4, col / 4) = sum; } } channels_.push_back(acf_channel.clone()); } }
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times) { std::vector<Mat> images; src.getMatVector(images); Mat times = _times.getMat(); CV_Assert(images.size() == times.total()); checkImageDimensions(images); CV_Assert(images[0].depth() == CV_8U); int channels = images[0].channels(); int CV_32FCC = CV_MAKETYPE(CV_32F, channels); dst.create(LDR_SIZE, 1, CV_32FCC); Mat response = dst.getMat(); response = linearResponse(3) / (LDR_SIZE / 2.0f); Mat card = Mat::zeros(LDR_SIZE, 1, CV_32FCC); for(size_t i = 0; i < images.size(); i++) { uchar *ptr = images[i].ptr(); for(size_t pos = 0; pos < images[i].total(); pos++) { for(int c = 0; c < channels; c++, ptr++) { card.at<Vec3f>(*ptr)[c] += 1; } } } card = 1.0 / card; Ptr<MergeRobertson> merge = createMergeRobertson(); for(int iter = 0; iter < max_iter; iter++) { radiance = Mat::zeros(images[0].size(), CV_32FCC); merge->process(images, radiance, times, response); Mat new_response = Mat::zeros(LDR_SIZE, 1, CV_32FC3); for(size_t i = 0; i < images.size(); i++) { uchar *ptr = images[i].ptr(); float* rad_ptr = radiance.ptr<float>(); for(size_t pos = 0; pos < images[i].total(); pos++) { for(int c = 0; c < channels; c++, ptr++, rad_ptr++) { new_response.at<Vec3f>(*ptr)[c] += times.at<float>((int)i) * *rad_ptr; } } } new_response = new_response.mul(card); for(int c = 0; c < 3; c++) { float middle = new_response.at<Vec3f>(LDR_SIZE / 2)[c]; for(int i = 0; i < LDR_SIZE; i++) { new_response.at<Vec3f>(i)[c] /= middle; } } float diff = static_cast<float>(sum(sum(abs(new_response - response)))[0] / channels); new_response.copyTo(response); if(diff < threshold) { break; } } }
void cv::fastNlMeansDenoisingColoredMulti( InputArrayOfArrays _srcImgs, OutputArray _dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hForColorComponents, int templateWindowSize, int searchWindowSize) { std::vector<Mat> srcImgs; _srcImgs.getMatVector(srcImgs); fastNlMeansDenoisingMultiCheckPreconditions( srcImgs, imgToDenoiseIndex, temporalWindowSize, templateWindowSize, searchWindowSize); _dst.create(srcImgs[0].size(), srcImgs[0].type()); Mat dst = _dst.getMat(); int src_imgs_size = static_cast<int>(srcImgs.size()); if (srcImgs[0].type() != CV_8UC3) { CV_Error(Error::StsBadArg, "Type of input images should be CV_8UC3!"); return; } int from_to[] = { 0,0, 1,1, 2,2 }; // TODO convert only required images std::vector<Mat> src_lab(src_imgs_size); std::vector<Mat> l(src_imgs_size); std::vector<Mat> ab(src_imgs_size); for (int i = 0; i < src_imgs_size; i++) { src_lab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC3); l[i] = Mat::zeros(srcImgs[0].size(), CV_8UC1); ab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC2); cvtColor(srcImgs[i], src_lab[i], COLOR_LBGR2Lab); Mat l_ab[] = { l[i], ab[i] }; mixChannels(&src_lab[i], 1, l_ab, 2, from_to, 3); } Mat dst_l; Mat dst_ab; fastNlMeansDenoisingMulti( l, dst_l, imgToDenoiseIndex, temporalWindowSize, h, templateWindowSize, searchWindowSize); fastNlMeansDenoisingMulti( ab, dst_ab, imgToDenoiseIndex, temporalWindowSize, hForColorComponents, templateWindowSize, searchWindowSize); Mat l_ab_denoised[] = { dst_l, dst_ab }; Mat dst_lab(srcImgs[0].size(), srcImgs[0].type()); mixChannels(l_ab_denoised, 2, &dst_lab, 1, from_to, 3); cvtColor(dst_lab, dst, COLOR_Lab2LBGR); }
// Reconstruction function for API void reconstruct(const InputArrayOfArrays points2d, OutputArrayOfArrays projection_matrices, OutputArray points3d, bool is_projective, bool has_outliers, bool is_sequence) { int nviews = points2d.total(); cv::Mat F; // OpenCV data types std::vector<cv::Mat> pts2d; points2d.getMatVector(pts2d); int depth = pts2d[0].depth(); // Projective reconstruction if (is_projective) { // Two view reconstruction if (nviews == 2) { // Get fundamental matrix fundamental8Point(pts2d[0], pts2d[1], F, has_outliers); // Get Projection matrices cv::Mat P, Pp; projectionsFromFundamental(F, P, Pp); projection_matrices.create(2, 1, depth); P.copyTo(projection_matrices.getMatRef(0)); Pp.copyTo(projection_matrices.getMatRef(1)); // Triangulate and find 3D points using inliers triangulatePoints(points2d, projection_matrices, points3d); } } // Affine reconstruction else { // Two view reconstruction if (nviews == 2) { } else { } } }
void DescriptorExtractor::compute( InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, OutputArrayOfArrays _descCollection ) const { std::vector<Mat> imageCollection, descCollection; _imageCollection.getMatVector(imageCollection); _descCollection.getMatVector(descCollection); CV_Assert( imageCollection.size() == pointCollection.size() ); descCollection.resize( imageCollection.size() ); for( size_t i = 0; i < imageCollection.size(); i++ ) compute( imageCollection[i], pointCollection[i], descCollection[i] ); }
void descriptorExtractor::extract(InputArrayOfArrays inputimg, OutputArray feature, String feature_blob) { if (net_ready) { Blob<float>* input_layer = convnet->input_blobs()[0]; input_layer->Reshape(1, num_channels, input_geometry.height, input_geometry.width); /* Forward dimension change to all layers. */ convnet->Reshape(); std::vector<cv::Mat> input_channels; wrapInput(&input_channels); if (inputimg.kind() == 65536) {/* this is a Mat */ Mat img = inputimg.getMat(); preprocess(img, &input_channels); convnet->ForwardPrefilled(); /* Copy the output layer to a std::vector */ Blob<float>* output_layer = convnet->blob_by_name(feature_blob).get(); const float* begin = output_layer->cpu_data(); const float* end = begin + output_layer->channels(); std::vector<float> featureVec = std::vector<float>(begin, end); cv::Mat feature_mat = cv::Mat(featureVec, true).t(); feature_mat.copyTo(feature); } else {/* This is a vector<Mat> */ vector<Mat> img; inputimg.getMatVector(img); Mat feature_vector; for (unsigned int i = 0; i < img.size(); ++i) { preprocess(img[i], &input_channels); convnet->ForwardPrefilled(); /* Copy the output layer to a std::vector */ Blob<float>* output_layer = convnet->blob_by_name(feature_blob).get(); const float* begin = output_layer->cpu_data(); const float* end = begin + output_layer->channels(); std::vector<float> featureVec = std::vector<float>(begin, end); if (i == 0) { feature_vector = cv::Mat(featureVec, true).t(); int dim_feature = feature_vector.cols; feature_vector.resize(img.size(), dim_feature); } feature_vector.row(i) = cv::Mat(featureVec, true).t(); } feature_vector.copyTo(feature); } } else std::cout << "Device must be set properly using constructor and the net must be set in advance using loadNet."; };
void Feature2D::detect( InputArrayOfArrays _images, std::vector<std::vector<KeyPoint> >& keypoints, InputArrayOfArrays _masks ) { CV_INSTRUMENT_REGION() vector<Mat> images, masks; _images.getMatVector(images); size_t i, nimages = images.size(); if( !_masks.empty() ) { _masks.getMatVector(masks); CV_Assert(masks.size() == nimages); } keypoints.resize(nimages); for( i = 0; i < nimages; i++ ) { detect(images[i], keypoints[i], masks.empty() ? Mat() : masks[i] ); } }
void ICFFeatureEvaluatorImpl::setChannels(InputArrayOfArrays channels) { channels_.clear(); vector<Mat> ch; channels.getMatVector(ch); for( size_t i = 0; i < ch.size(); ++i ) { const Mat &channel = ch[i]; Mat integral_channel; integral(channel, integral_channel, CV_32F); integral_channel.convertTo(integral_channel, CV_32S); channels_.push_back(integral_channel.clone()); } }
void process(InputArrayOfArrays _src, std::vector<Mat>& dst) { std::vector<Mat> src; _src.getMatVector(src); checkImageDimensions(src); dst.resize(src.size()); size_t pivot = src.size() / 2; dst[pivot] = src[pivot]; Mat gray_base; cvtColor(src[pivot], gray_base, COLOR_RGB2GRAY); std::vector<Point> shifts; for(size_t i = 0; i < src.size(); i++) { if(i == pivot) { shifts.push_back(Point(0, 0)); continue; } Mat gray; cvtColor(src[i], gray, COLOR_RGB2GRAY); Point shift = calculateShift(gray_base, gray); shifts.push_back(shift); shiftMat(src[i], dst[i], shift); } if(cut) { Point max(0, 0), min(0, 0); for(size_t i = 0; i < shifts.size(); i++) { if(shifts[i].x > max.x) { max.x = shifts[i].x; } if(shifts[i].y > max.y) { max.y = shifts[i].y; } if(shifts[i].x < min.x) { min.x = shifts[i].x; } if(shifts[i].y < min.y) { min.y = shifts[i].y; } } Point size = dst[0].size(); for(size_t i = 0; i < dst.size(); i++) { dst[i] = dst[i](Rect(max, min + size)); } } }
void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserveData) { if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) { String error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >)."; CV_Error(Error::StsBadArg, error_message); } if(_in_src.total() == 0) { String error_message = format("Empty training data was given. You'll need more than one sample to learn a model."); CV_Error(Error::StsUnsupportedFormat, error_message); } else if(_in_labels.getMat().type() != CV_32SC1) { String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type()); CV_Error(Error::StsUnsupportedFormat, error_message); } // get the vector of matrices std::vector<Mat> src; _in_src.getMatVector(src); // get the label matrix Mat labels = _in_labels.getMat(); // check if data is well- aligned if(labels.total() != src.size()) { String error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), _labels.total()); CV_Error(Error::StsBadArg, error_message); } // if this model should be trained without preserving old data, delete old model data if(!preserveData) { _labels.release(); _histograms.clear(); } // append labels to _labels matrix for(size_t labelIdx = 0; labelIdx < labels.total(); labelIdx++) { _labels.push_back(labels.at<int>((int)labelIdx)); } // store the spatial histograms of the original data for(size_t sampleIdx = 0; sampleIdx < src.size(); sampleIdx++) { // calculate lbp image Mat lbp_image = elbp(src[sampleIdx], _radius, _neighbors); // get spatial histogram from this lbp image Mat p = spatial_histogram( lbp_image, /* lbp_image */ static_cast<int>(std::pow(2.0, static_cast<double>(_neighbors))), /* number of possible patterns */ _grid_x, /* grid size x */ _grid_y, /* grid size y */ true); // add to templates _histograms.push_back(p); } }
void GaborLbp_Algorithm::train(InputArrayOfArrays _in_src, InputArray _in_labels) { if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) { string error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >)."; CV_Error(CV_StsBadArg, error_message); } if(_in_src.total() == 0) { string error_message = format("Empty training data was given. You'll need more than one sample to learn a model."); CV_Error(CV_StsUnsupportedFormat, error_message); } else if(_in_labels.getMat().type() != CV_32SC1) { string error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type()); CV_Error(CV_StsUnsupportedFormat, error_message); } vector<Mat> src; _in_src.getMatVector(src); Mat labels = _in_labels.getMat(); if(labels.total() != src.size()) { string error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), m_labels.total()); CV_Error(CV_StsBadArg, error_message); } int i=0,j=0; double m_kmax = CV_PI/2; double m_f = sqrt(double(2)); double m_sigma = 2*CV_PI; for (size_t sampleIdx = 0;sampleIdx<src.size();sampleIdx++) { int row = src[sampleIdx].rows; int col = src[sampleIdx].cols; if (row <= 0 || col <= 0) { continue; } m_labels.push_back(labels.at<int>((int)sampleIdx)); ZGabor m_gabor; m_gabor.InitGabor(); m_gabor.GetFeature(src[sampleIdx],1,8,8,8); cout<<sampleIdx<<endl; m_projection.push_back(m_gabor.m_eigenvector); } }
void cv::fastNlMeansDenoisingMulti( InputArrayOfArrays _srcImgs, OutputArray _dst, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize, int searchWindowSize) { std::vector<Mat> srcImgs; _srcImgs.getMatVector(srcImgs); fastNlMeansDenoisingMultiCheckPreconditions( srcImgs, imgToDenoiseIndex, temporalWindowSize, templateWindowSize, searchWindowSize); _dst.create(srcImgs[0].size(), srcImgs[0].type()); Mat dst = _dst.getMat(); switch (srcImgs[0].type()) { case CV_8U: parallel_for_(cv::Range(0, srcImgs[0].rows), FastNlMeansMultiDenoisingInvoker<uchar>( srcImgs, imgToDenoiseIndex, temporalWindowSize, dst, templateWindowSize, searchWindowSize, h)); break; case CV_8UC2: parallel_for_(cv::Range(0, srcImgs[0].rows), FastNlMeansMultiDenoisingInvoker<cv::Vec2b>( srcImgs, imgToDenoiseIndex, temporalWindowSize, dst, templateWindowSize, searchWindowSize, h)); break; case CV_8UC3: parallel_for_(cv::Range(0, srcImgs[0].rows), FastNlMeansMultiDenoisingInvoker<cv::Vec3b>( srcImgs, imgToDenoiseIndex, temporalWindowSize, dst, templateWindowSize, searchWindowSize, h)); break; default: CV_Error(Error::StsBadArg, "Unsupported matrix format! Only uchar, Vec2b, Vec3b are supported"); } }
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response) { std::vector<Mat> images; src.getMatVector(images); Mat times = _times.getMat(); CV_Assert(images.size() == times.total()); checkImageDimensions(images); CV_Assert(images[0].depth() == CV_8U); int channels = images[0].channels(); int CV_32FCC = CV_MAKETYPE(CV_32F, channels); dst.create(images[0].size(), CV_32FCC); Mat result = dst.getMat(); Mat response = input_response.getMat(); if(response.empty()) { float middle = LDR_SIZE / 2.0f; response = linearResponse(channels) / middle; } CV_Assert(response.rows == LDR_SIZE && response.cols == 1 && response.channels() == channels); result = Mat::zeros(images[0].size(), CV_32FCC); Mat wsum = Mat::zeros(images[0].size(), CV_32FCC); for(size_t i = 0; i < images.size(); i++) { Mat im, w; LUT(images[i], weight, w); LUT(images[i], response, im); result += times.at<float>((int)i) * w.mul(im); wsum += times.at<float>((int)i) * times.at<float>((int)i) * w; } result = result.mul(1 / wsum); }
void cv::merge(InputArrayOfArrays _mv, OutputArray _dst) { vector<Mat> mv; _mv.getMatVector(mv); merge(!mv.empty() ? &mv[0] : 0, mv.size(), _dst); }
void FisherFaceRecognizer::train(InputArrayOfArrays _in_src, InputArray _inm_labels, bool preserveData) { if (_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) { String error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >)."; CV_Error(CV_StsBadArg, error_message); } if (_in_src.total() == 0) { String error_message = format("Empty training data was given. You'll need more than one sample to learn a model."); CV_Error(CV_StsUnsupportedFormat, error_message); } else if (_inm_labels.getMat().type() != CV_32SC1) { String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _inm_labels.type()); CV_Error(CV_StsUnsupportedFormat, error_message); } // get the vector of matrices std::vector<Mat> src; _in_src.getMatVector(src); // get the label matrix Mat labels = _inm_labels.getMat(); // check if data is well- aligned if (labels.total() != src.size()) { String error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), m_labels.total()); CV_Error(CV_StsBadArg, error_message); } // if this model should be trained without preserving old data, delete old model data if (!preserveData) { m_labels.release(); m_src.clear(); } // append labels to m_labels matrix for (size_t labelIdx = 0; labelIdx < labels.total(); labelIdx++) { m_labels.push_back(labels.at<int>((int)labelIdx)); m_src.push_back(src[(int)labelIdx]); } // observations in row Mat data = asRowMatrix(m_src, CV_64FC1); // number of samples int n = data.rows; /* LDA needs more than one class We have to check the labels first */ bool label_flag = false; for (int i = 1 ; i < m_labels.rows ; i++) { if (m_labels.at<int>(i, 0)!=m_labels.at<int>(i-1, 0)) { label_flag = true; break; } } if (!label_flag) { String error_message = format("The labels should contain more than one types."); CV_Error(CV_StsBadArg, error_message); } // clear existing model data m_projections.clear(); std::vector<int> ll; for (unsigned int i = 0 ; i < m_labels.total() ; i++) { ll.push_back(m_labels.at<int>(i)); } // get the number of unique classes int C = (int) remove_dups(ll).size(); // clip number of components to be valid m_num_components = (C-1); // perform the PCA PCA pca(data, Mat(), PCA::DATA_AS_ROW, (n-C)); LDA lda(pca.project(data),m_labels, m_num_components); // Now calculate the projection matrix as pca.eigenvectors * lda.eigenvectors. // Note: OpenCV stores the eigenvectors by row, so we need to transpose it! gemm(pca.eigenvectors, lda.eigenvectors(), 1.0, Mat(), 0.0, m_eigenvectors, GEMM_1_T); // store the projections of the original data for (int sampleIdx = 0 ; sampleIdx < data.rows ; sampleIdx++) { Mat p = LDA::subspaceProject(m_eigenvectors, m_mean, data.row(sampleIdx)); m_projections.push_back(p); } }
void triangulatePoints(InputArrayOfArrays _points2d, InputArrayOfArrays _projection_matrices, OutputArray _points3d) { // check size_t nviews = (unsigned) _points2d.total(); CV_Assert(nviews >= 2 && nviews == _projection_matrices.total()); // inputs size_t n_points; vector<Mat_<double> > points2d(nviews); vector<Matx34d> projection_matrices(nviews); { vector<Mat> points2d_tmp; _points2d.getMatVector(points2d_tmp); n_points = points2d_tmp[0].cols; vector<Mat> projection_matrices_tmp; _projection_matrices.getMatVector(projection_matrices_tmp); // Make sure the dimensions are right for(size_t i=0; i<nviews; ++i) { CV_Assert(points2d_tmp[i].rows == 2 && points2d_tmp[i].cols == n_points); if (points2d_tmp[i].type() == CV_64F) points2d[i] = points2d_tmp[i]; else points2d_tmp[i].convertTo(points2d[i], CV_64F); CV_Assert(projection_matrices_tmp[i].rows == 3 && projection_matrices_tmp[i].cols == 4); if (projection_matrices_tmp[i].type() == CV_64F) projection_matrices[i] = projection_matrices_tmp[i]; else projection_matrices_tmp[i].convertTo(projection_matrices[i], CV_64F); } } // output _points3d.create(3, n_points, CV_64F); cv::Mat points3d = _points3d.getMat(); // Two view if( nviews == 2 ) { const Mat_<double> &xl = points2d[0], &xr = points2d[1]; const Matx34d & Pl = projection_matrices[0]; // left matrix projection const Matx34d & Pr = projection_matrices[1]; // right matrix projection // triangulate for( unsigned i = 0; i < n_points; ++i ) { Vec3d point3d; triangulateDLT( Vec2d(xl(0,i), xl(1,i)), Vec2d(xr(0,i), xr(1,i)), Pl, Pr, point3d ); for(char j=0; j<3; ++j) points3d.at<double>(j, i) = point3d[j]; } } else if( nviews > 2 ) { // triangulate for( unsigned i=0; i < n_points; ++i ) { // build x matrix (one point per view) Mat_<double> x( 2, nviews ); for( unsigned k=0; k < nviews; ++k ) { points2d.at(k).col(i).copyTo( x.col(k) ); } Vec3d point3d; nViewTriangulate( x, projection_matrices, point3d ); for(char j=0; j<3; ++j) points3d.at<double>(j, i) = point3d[j]; } } }
// Reconstruction function for API void reconstruct(InputArrayOfArrays points2d, OutputArray Ps, OutputArray points3d, InputOutputArray K, bool is_projective) { const int nviews = points2d.total(); CV_Assert( nviews >= 2 ); // OpenCV data types std::vector<Mat> pts2d; points2d.getMatVector(pts2d); const int depth = pts2d[0].depth(); Matx33d Ka = K.getMat(); // Projective reconstruction if (is_projective) { if ( nviews == 2 ) { // Get Projection matrices Matx33d F; Matx34d P, Pp; normalizedEightPointSolver(pts2d[0], pts2d[1], F); projectionsFromFundamental(F, P, Pp); Ps.create(2, 1, depth); Mat(P).copyTo(Ps.getMatRef(0)); Mat(Pp).copyTo(Ps.getMatRef(1)); // Triangulate and find 3D points using inliers triangulatePoints(points2d, Ps, points3d); } else { std::vector<Mat> Rs, Ts; reconstruct(points2d, Rs, Ts, Ka, points3d, is_projective); // From Rs and Ts, extract Ps const int nviews = Rs.size(); Ps.create(nviews, 1, depth); Matx34d P; for (size_t i = 0; i < nviews; ++i) { projectionFromKRt(Ka, Rs[i], Vec3d(Ts[i]), P); Mat(P).copyTo(Ps.getMatRef(i)); } Mat(Ka).copyTo(K.getMat()); } } // Affine reconstruction else { // TODO: implement me } }
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response) { std::vector<Mat> images; src.getMatVector(images); Mat times = _times.getMat(); CV_Assert(images.size() == times.total()); checkImageDimensions(images); CV_Assert(images[0].depth() == CV_8U); int channels = images[0].channels(); Size size = images[0].size(); int CV_32FCC = CV_MAKETYPE(CV_32F, channels); dst.create(images[0].size(), CV_32FCC); Mat result = dst.getMat(); Mat response = input_response.getMat(); if(response.empty()) { response = linearResponse(channels); response.at<Vec3f>(0) = response.at<Vec3f>(1); } Mat log_response; log(response, log_response); CV_Assert(log_response.rows == LDR_SIZE && log_response.cols == 1 && log_response.channels() == channels); Mat exp_values(times); log(exp_values, exp_values); result = Mat::zeros(size, CV_32FCC); std::vector<Mat> result_split; split(result, result_split); Mat weight_sum = Mat::zeros(size, CV_32F); for(size_t i = 0; i < images.size(); i++) { std::vector<Mat> splitted; split(images[i], splitted); Mat w = Mat::zeros(size, CV_32F); for(int c = 0; c < channels; c++) { LUT(splitted[c], weights, splitted[c]); w += splitted[c]; } w /= channels; Mat response_img; LUT(images[i], log_response, response_img); split(response_img, splitted); for(int c = 0; c < channels; c++) { result_split[c] += w.mul(splitted[c] - exp_values.at<float>((int)i)); } weight_sum += w; } weight_sum = 1.0f / weight_sum; for(int c = 0; c < channels; c++) { result_split[c] = result_split[c].mul(weight_sum); } merge(result_split, result); exp(result, result); }
void process(InputArrayOfArrays src, OutputArray dst) { std::vector<Mat> images; src.getMatVector(images); checkImageDimensions(images); int channels = images[0].channels(); CV_Assert(channels == 1 || channels == 3); Size size = images[0].size(); int CV_32FCC = CV_MAKETYPE(CV_32F, channels); std::vector<Mat> weights(images.size()); Mat weight_sum = Mat::zeros(size, CV_32F); for(size_t i = 0; i < images.size(); i++) { Mat img, gray, contrast, saturation, wellexp; std::vector<Mat> splitted(channels); images[i].convertTo(img, CV_32F, 1.0f/255.0f); if(channels == 3) { cvtColor(img, gray, COLOR_RGB2GRAY); } else { img.copyTo(gray); } split(img, splitted); Laplacian(gray, contrast, CV_32F); contrast = abs(contrast); Mat mean = Mat::zeros(size, CV_32F); for(int c = 0; c < channels; c++) { mean += splitted[c]; } mean /= channels; saturation = Mat::zeros(size, CV_32F); for(int c = 0; c < channels; c++) { Mat deviation = splitted[c] - mean; pow(deviation, 2.0f, deviation); saturation += deviation; } sqrt(saturation, saturation); wellexp = Mat::ones(size, CV_32F); for(int c = 0; c < channels; c++) { Mat expo = splitted[c] - 0.5f; pow(expo, 2.0f, expo); expo = -expo / 0.08f; exp(expo, expo); wellexp = wellexp.mul(expo); } pow(contrast, wcon, contrast); pow(saturation, wsat, saturation); pow(wellexp, wexp, wellexp); weights[i] = contrast; if(channels == 3) { weights[i] = weights[i].mul(saturation); } weights[i] = weights[i].mul(wellexp) + 1e-12f; weight_sum += weights[i]; } int maxlevel = static_cast<int>(logf(static_cast<float>(min(size.width, size.height))) / logf(2.0f)); std::vector<Mat> res_pyr(maxlevel + 1); for(size_t i = 0; i < images.size(); i++) { weights[i] /= weight_sum; Mat img; images[i].convertTo(img, CV_32F, 1.0f/255.0f); std::vector<Mat> img_pyr, weight_pyr; buildPyramid(img, img_pyr, maxlevel); buildPyramid(weights[i], weight_pyr, maxlevel); for(int lvl = 0; lvl < maxlevel; lvl++) { Mat up; pyrUp(img_pyr[lvl + 1], up, img_pyr[lvl].size()); img_pyr[lvl] -= up; } for(int lvl = 0; lvl <= maxlevel; lvl++) { std::vector<Mat> splitted(channels); split(img_pyr[lvl], splitted); for(int c = 0; c < channels; c++) { splitted[c] = splitted[c].mul(weight_pyr[lvl]); } merge(splitted, img_pyr[lvl]); if(res_pyr[lvl].empty()) { res_pyr[lvl] = img_pyr[lvl]; } else { res_pyr[lvl] += img_pyr[lvl]; } } } for(int lvl = maxlevel; lvl > 0; lvl--) { Mat up; pyrUp(res_pyr[lvl], up, res_pyr[lvl - 1].size()); res_pyr[lvl - 1] += up; } dst.create(size, CV_32FCC); res_pyr[0].copyTo(dst.getMat()); }
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times) { CV_INSTRUMENT_REGION() // check inputs std::vector<Mat> images; src.getMatVector(images); Mat times = _times.getMat(); CV_Assert(images.size() == times.total()); checkImageDimensions(images); CV_Assert(images[0].depth() == CV_8U); CV_Assert(times.type() == CV_32FC1); // create output int channels = images[0].channels(); int CV_32FCC = CV_MAKETYPE(CV_32F, channels); int rows = images[0].rows; int cols = images[0].cols; dst.create(LDR_SIZE, 1, CV_32FCC); Mat result = dst.getMat(); // pick pixel locations (either random or in a rectangular grid) std::vector<Point> points; points.reserve(samples); if(random) { for(int i = 0; i < samples; i++) { points.push_back(Point(rand() % cols, rand() % rows)); } } else { int x_points = static_cast<int>(sqrt(static_cast<double>(samples) * cols / rows)); CV_Assert(0 < x_points && x_points <= cols); int y_points = samples / x_points; CV_Assert(0 < y_points && y_points <= rows); int step_x = cols / x_points; int step_y = rows / y_points; for(int i = 0, x = step_x / 2; i < x_points; i++, x += step_x) { for(int j = 0, y = step_y / 2; j < y_points; j++, y += step_y) { if( 0 <= x && x < cols && 0 <= y && y < rows ) { points.push_back(Point(x, y)); } } } // we can have slightly less grid points than specified //samples = static_cast<int>(points.size()); } // we need enough equations to ensure a sufficiently overdetermined system // (maybe only as a warning) //CV_Assert(points.size() * (images.size() - 1) >= LDR_SIZE); // solve for imaging system response function, over each channel separately std::vector<Mat> result_split(channels); for(int ch = 0; ch < channels; ch++) { // initialize system of linear equations Mat A = Mat::zeros((int)points.size() * (int)images.size() + LDR_SIZE + 1, LDR_SIZE + (int)points.size(), CV_32F); Mat B = Mat::zeros(A.rows, 1, CV_32F); // include the data-fitting equations int k = 0; for(size_t i = 0; i < points.size(); i++) { for(size_t j = 0; j < images.size(); j++) { // val = images[j].at<Vec3b>(points[i].y, points[i].x)[ch] int val = images[j].ptr()[channels*(points[i].y * cols + points[i].x) + ch]; float wij = w.at<float>(val); A.at<float>(k, val) = wij; A.at<float>(k, LDR_SIZE + (int)i) = -wij; B.at<float>(k, 0) = wij * log(times.at<float>((int)j)); k++; } } // fix the curve by setting its middle value to 0 A.at<float>(k, LDR_SIZE / 2) = 1; k++; // include the smoothness equations for(int i = 0; i < (LDR_SIZE - 2); i++) { float wi = w.at<float>(i + 1); A.at<float>(k, i) = lambda * wi; A.at<float>(k, i + 1) = -2 * lambda * wi; A.at<float>(k, i + 2) = lambda * wi; k++; } // solve the overdetermined system using SVD (least-squares problem) Mat solution; solve(A, B, solution, DECOMP_SVD); solution.rowRange(0, LDR_SIZE).copyTo(result_split[ch]); } // combine log-exposures and take its exponent merge(result_split, result); exp(result, result); }
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times) { std::vector<Mat> images; src.getMatVector(images); Mat times = _times.getMat(); CV_Assert(images.size() == times.total()); checkImageDimensions(images); CV_Assert(images[0].depth() == CV_8U); int channels = images[0].channels(); int CV_32FCC = CV_MAKETYPE(CV_32F, channels); dst.create(LDR_SIZE, 1, CV_32FCC); Mat result = dst.getMat(); std::vector<Point> sample_points; if(random) { for(int i = 0; i < samples; i++) { sample_points.push_back(Point(rand() % images[0].cols, rand() % images[0].rows)); } } else { int x_points = static_cast<int>(sqrt(static_cast<double>(samples) * images[0].cols / images[0].rows)); int y_points = samples / x_points; int step_x = images[0].cols / x_points; int step_y = images[0].rows / y_points; for(int i = 0, x = step_x / 2; i < x_points; i++, x += step_x) { for(int j = 0, y = step_y; j < y_points; j++, y += step_y) { sample_points.push_back(Point(x, y)); } } } std::vector<Mat> result_split(channels); for(int channel = 0; channel < channels; channel++) { Mat A = Mat::zeros((int)sample_points.size() * (int)images.size() + LDR_SIZE + 1, LDR_SIZE + (int)sample_points.size(), CV_32F); Mat B = Mat::zeros(A.rows, 1, CV_32F); int eq = 0; for(size_t i = 0; i < sample_points.size(); i++) { for(size_t j = 0; j < images.size(); j++) { int val = images[j].ptr()[3*(sample_points[i].y * images[j].cols + sample_points[j].x) + channel]; A.at<float>(eq, val) = w.at<float>(val); A.at<float>(eq, LDR_SIZE + (int)i) = -w.at<float>(val); B.at<float>(eq, 0) = w.at<float>(val) * log(times.at<float>((int)j)); eq++; } } A.at<float>(eq, LDR_SIZE / 2) = 1; eq++; for(int i = 0; i < 254; i++) { A.at<float>(eq, i) = lambda * w.at<float>(i + 1); A.at<float>(eq, i + 1) = -2 * lambda * w.at<float>(i + 1); A.at<float>(eq, i + 2) = lambda * w.at<float>(i + 1); eq++; } Mat solution; solve(A, B, solution, DECOMP_SVD); solution.rowRange(0, LDR_SIZE).copyTo(result_split[channel]); } merge(result_split, result); exp(result, result); }