/** Method to register two images using the simple pairwise algorithm without allocating memory @param srcIm: Source Image @param dstIm: Destination image @param Covar: CvMat Not used @param pInterframePoorFlag: Detection Flag @param pA: Not Used @param pB: Not Used @return void */ void InterframeRegister::Register( Mat srcIm, Mat dstIm, Mat &pTransform, Mat pCovar, bool pInterframePoorFlag) { /* init transform to identity */ setIdentity(pTransform); /* image params */ const Size szCurr = srcIm.size(); /* registration parameters */ double bound[2][2] = {0.0}; double transform[3][3] = {0.0}; vector <Point2f> ptSrc (InterframeRegister::MAX_KLT_POINTS); /* Contrast stretch */ Mat srcStretch = srcIm.clone(); Mat dstStretch = dstIm.clone(); cvutStdDevStretch( srcStretch , srcStretch ); cvutStdDevStretch( dstStretch , dstStretch ); //////////////////////////////////////////////////////// /* KLT ONLY. NO SWITCHING TO INTENSITY-BASED */ int cornerCount = InterframeRegister::InitFeatures(srcStretch, ptSrc); vector <Point2f> ptDst (cornerCount); /* sanity check for sufficiency */ if(cornerCount < InterframeRegister::INLIER_THRESH) { if(pInterframePoorFlag != NULL) pInterframePoorFlag = true; return; } /* find matching points */ cornerCount = TrackFeatures(srcStretch, dstStretch, ptSrc, ptDst, cornerCount); /* Robust estimation of motion parameters */ vector <Point2d> optInliersL (cornerCount); /* dst */ vector <Point2d> optInliersR (cornerCount); /* src */ pTransform = RunRansac(ptDst, ptSrc, cornerCount, optInliersL, optInliersR, srcIm, dstIm); }
/** * Main function: runs plugin based on its ID * @param plugin ID * @param image to be processed **/ QSharedPointer<nmc::DkImageContainer> DkImageStitchingPlugin::runPlugin(const QString& /*runID*/, QSharedPointer<nmc::DkImageContainer> imgC) const { QString dp = ""; if(imgC) dp = imgC->fileInfo().absolutePath(); QStringList files = QFileDialog::getOpenFileNames(DkPluginInterface::getMainWindow(), tr("Select photos"), dp); if (files.size() != 2) return imgC; // TODO: do NOT use highgui functions - nomacs is a image viewer, so we are much better in loading images than opencv // NOTE: imgC is the currently loaded image, so you could use it as 'reference' image // however, for the final plugin a nice UI has to be done anyhow //cv::Mat reference = cv::imread(files[0].toStdString()); //cv::Mat target = cv::imread(files[1].toStdString()); // loading images using nomacs nmc::DkBasicLoader loader; loader.loadGeneral(files[0]); cv::Mat reference = DkImage::qImage2Mat(loader.image()); loader.loadGeneral(files[0]); cv::Mat target = DkImage::qImage2Mat(loader.image()); cv::Mat grayRef, grayTarget; cv::cvtColor(reference,grayRef,CV_BGR2GRAY); cv::cvtColor(target,grayTarget,CV_BGR2GRAY); // updated for opencv 3 cv::Ptr<cv::Feature2D> f2d = cv::xfeatures2d::SIFT::create(); std::vector<cv::KeyPoint> keypoints1, keypoints2; //cv::SiftFeatureDetector detector; //detector.detect(grayRef,keypoints1); //detector.detect(grayTarget,keypoints2); f2d->detect(grayRef, keypoints1); f2d->detect(grayTarget, keypoints2); cv::Mat descriptor1, descriptor2; //cv::SiftDescriptorExtractor extractor; //extractor.compute(reference,keypoints1,descriptor1); //extractor.compute(target,keypoints2,descriptor2); f2d->compute(grayRef, keypoints1, descriptor1); f2d->compute(grayTarget, keypoints2, descriptor2); // TODO: this is pretty much spaghetti code https://en.wikipedia.org/wiki/Spaghetti_code // you should: // - split the code into different files (e.g. DkStitcher) // - make classes where appropriate // - split the code into functions cv::BFMatcher matcher(cv::NORM_L2); std::vector<cv::DMatch> matches; matcher.match(descriptor1,descriptor2,matches); if (matches.empty()) return imgC; double minDist = matches[0].distance; for (int i = 1; i < matches.size(); ++i) { if (matches[i].distance < minDist) minDist = matches[i].distance; } minDist += 0.1; std::vector<cv::Point2f> queryPts; std::vector<cv::Point2f> trainPts; for (int i = 0; i < matches.size(); ++i) { if (matches[i].distance < 3.0*minDist) { int queryIdx = matches[i].queryIdx; int trainIdx = matches[i].trainIdx; queryPts.push_back(keypoints1[queryIdx].pt); trainPts.push_back(keypoints2[trainIdx].pt); } } ///Obtain the global homography and inliers std::vector<uchar> inliers_mask; cv::Mat globalH = cv::findHomography(queryPts,trainPts, inliers_mask, CV_RANSAC); std::vector<cv::Point2f> inliersTarget; std::vector<cv::Point2f> inliersReference; for (int i = 0; i < inliers_mask.size(); ++i) { if (inliers_mask[i]) { inliersTarget.emplace_back(queryPts[i]); inliersReference.emplace_back(trainPts[i]); } } ///Build the A matrix with the matching points cv::Mat A(2*inliersTarget.size(),9,CV_32F); for (int i = 0; i < inliersTarget.size(); ++i) { const cv::Point2f &pTarget = inliersTarget[i]; const cv::Point2f &pReference = inliersReference[i]; A.at<float>(2*i,0) = 0.0; A.at<float>(2*i,1) = 0.0; A.at<float>(2*i,2) = 0.0; A.at<float>(2*i,3) = -pTarget.x; A.at<float>(2*i,4) = -pTarget.y; A.at<float>(2*i,5) = -1.0; A.at<float>(2*i,6) = pReference.y*pTarget.x; A.at<float>(2*i,7) = pReference.y*pTarget.y; A.at<float>(2*i,8) = pReference.y; A.at<float>(2*i+1,0) = pTarget.x; A.at<float>(2*i+1,1) = pTarget.y; A.at<float>(2*i+1,2) = 1.0; A.at<float>(2*i+1,3) = 0.0; A.at<float>(2*i+1,4) = 0.0; A.at<float>(2*i+1,5) = 0.0; A.at<float>(2*i+1,6) = -pReference.x*pTarget.x; A.at<float>(2*i+1,7) = -pReference.x*pTarget.y; A.at<float>(2*i+1,8) = -pReference.x; } ///Divide the reference image into CX*CY cells and calculate their ///local homographies. const int CX = 100; const int CY = 100; const int cellWidth = (reference.cols+CX-1)/CX; const int cellHeight = (reference.rows+CY-1)/CY; const float sigmaSquared = 12.5*12.5; std::vector<int> cellsType(CX*CY,false); ///(1 is overlapped cell) for (int i = 0; i < inliersTarget.size(); ++i) { const cv::Point2f &pt = inliersTarget[i]; int cellX = (int)(pt.x/cellWidth); int cellY = (int)(pt.y/cellHeight); assert(cellX >= 0 && cellX < CX && cellY >= 0 && cellY < CY); cellsType[cellY*CY+cellX] = true; } std::vector<cv::Mat> localHomographies(CX*CY); cv::Mat Wi(2*inliersTarget.size(),2*inliersTarget.size(),CV_32F,0.0); for (int i = 0; i < CX; ++i) { for (int j = 0; j < CY; ++j) { int centerX = i*cellHeight; int centerY = j*cellWidth; ///Build W matrix for each cell center for (int k = 0; k < inliersTarget.size(); ++k) { cv::Point2f xk = inliersTarget[k]; xk.x = centerX-xk.x; xk.y = centerY-xk.y; float w = exp(-1.0*sqrt(xk.x*xk.x+xk.y*xk.y)/sigmaSquared); Wi.at<float>(2*k,2*k) = w; Wi.at<float>(2*k+1,2*k+1) = w; } ///Calculate local homography for each cell cv::Mat w,u,vt; cv::SVD::compute(Wi*A,w,u,vt); float smallestSv = w.at<float>(0,0); int indexSmallestSv = 0; for (int k = 1; k < w.rows; ++k) { if (w.at<float>(k,0) < smallestSv) { smallestSv = w.at<float>(k,0); indexSmallestSv = k; } } ///Represent the homography as a 3x3 matrix cv::Mat localH(3,3,CV_64F,0.0); for (int k = 0; k < 9; ++k) localH.at<double>(k/3,k%3) = vt.row(indexSmallestSv).at<float>(k); // TODO: crashes here... if (localH.at<float>(2,2) < 0) localH *= -1; localHomographies[i*CY+j] = localH; } } ///Calculate canvas size using global homography cv::Point2f canvasCorners[4]; canvasCorners[0] = cv::Point2f(0,0); canvasCorners[1] = cv::Point2f(reference.cols,0); canvasCorners[2] = cv::Point2f(0,reference.rows); canvasCorners[3] = cv::Point2f(reference.cols,reference.rows); for (int i = 0; i < 4; ++i) { cv::Mat pSrc(3,1,CV_64F,1.0); pSrc.at<double>(0,0) = canvasCorners[i].x; pSrc.at<double>(1,0) = canvasCorners[i].y; cv::Mat pDst = globalH*pSrc; double w = pDst.at<double>(2,0); canvasCorners[i].x = 0.5+(pDst.at<double>(0,0)/w); canvasCorners[i].y = 0.5+(pDst.at<double>(1,0)/w); } int minX = floor(canvasCorners[0].x); int minY = floor(canvasCorners[0].y); int maxX = minX; int maxY = minY; for (int i = 1; i < 4; ++i) { minX = std::min(minX,(int)floor(canvasCorners[i].x)); minY = std::min(minY,(int)floor(canvasCorners[i].y)); maxX = std::max(maxX,(int)floor(canvasCorners[i].x)); maxY = std::max(maxY,(int)floor(canvasCorners[i].y)); } int canvasWidth = std::max(target.cols,maxX)-minX; int canvasHeight = std::max(target.rows,maxY)-minY; ///Calculate translation vector to properly position the ///reference image. cv::Mat T = cv::Mat::eye(3,3,CV_64F); if (minX < 0) T.at<double>(0,2) = -minX; else canvasWidth += minX; if (minY < 0) T.at<double>(1,2) = -minY; else canvasHeight += minY; cv::Mat globalTH = T*globalH; cv::Mat result(canvasHeight,canvasWidth,CV_8UC3,cv::Scalar(0,0,0)); for (int i = 0; i < CX; ++i) { for (int j = 0; j < CY; ++j) { for (int k = 0; k < cellHeight; ++k) { int pX = i*cellHeight+k; if (pX >= reference.rows) break; for (int l = 0; l < cellWidth; ++l) { int pY = j*cellWidth+l; if (pY >= reference.cols) break; cv::Mat ptSrc(3,1,CV_64F,1.0); ptSrc.at<double>(0,0) = pY; ptSrc.at<double>(1,0) = pX; cv::Mat ptDst = (T*localHomographies[i*CY+j])*ptSrc; ptDst /= ptDst.at<double>(2,0); int hX = ptDst.at<double>(0,0); int hY = ptDst.at<double>(1,0); if (hX >= 0 && hX < canvasWidth && hY >= 0 && hY < canvasHeight) result.at<cv::Vec3b>(hY,hX) = reference.at<cv::Vec3b>(pX,pY); } } } } cv::Mat half(result,cv::Rect(std::max(0,-minX),std::max(0,-minY),target.cols,target.rows)); target.copyTo(half); cv::cvtColor(result,result,CV_BGR2RGB); if (!imgC) // TODO: note, the constructor's input _should be_ the filepath not some name! imgC = QSharedPointer<nmc::DkImageContainer>(new nmc::DkImageContainer(QString("panoramic"))); // TODO: add a useful edit name (e.g. Stitching) and remove the empty quote of filepath imgC->setImage(nmc::DkImage::mat2QImage(result),""); return imgC; }