void detectNewImageFeatures( const cv::Mat image, const VectorImageFeaturePrediction &featuresPrediction, uint newImageFeaturesMaxSize, VectorImageFeatureMeasurement &newImageFeatures ) { newImageFeatures.clear(); // Calculamos el tamaño de las zonas de la imágen según los parámetros configurados ConfigurationManager& configManager = ConfigurationManager::getInstance(); //int zonesInARow = exp2f(configManager.ekfParams->detectNewFeaturesImageAreasDivideTimes); int zonesInARow = 1; int zoneWidth = image.cols / zonesInARow; int zoneHeight = image.rows / zonesInARow; // Construimos la mascara para buscar features solamente en zonas poco densas cv::Mat imageMask( cv::Mat::ones(image.rows, image.cols, CV_8UC1) * 255 ); buildImageMask( featuresPrediction, imageMask ); // Detectamos features std::vector<cv::KeyPoint> imageKeypoints; configManager.featureDetector->detect(image, imageKeypoints, imageMask); // Extraemos descriptores cv::Mat descriptors; configManager.descriptorExtractor->compute(image, imageKeypoints, descriptors); // Caso particular: la cantidad de features encontrados no supera los pedidos size_t imageKeypointsSize = imageKeypoints.size(); #ifdef DEBUG std::cout << "Cantidad de features detectados al agregar nuevos: " << imageKeypointsSize << std::endl; #endif if (imageKeypointsSize <= newImageFeaturesMaxSize) { double imagePos[2]; for (int i = 0; i < imageKeypointsSize; ++i) { cv::KeyPoint &currKeypoint = imageKeypoints[i]; imagePos[0] = currKeypoint.pt.x; imagePos[1] = currKeypoint.pt.y; newImageFeatures.push_back( new ImageFeatureMeasurement( imagePos, descriptors.row(i) ) ); } } else { // Buscamos nuevos features intentando que esten // lo mejor distribuidos posible en la imagen searchFeaturesByZone( featuresPrediction, imageKeypoints, descriptors, zonesInARow, zoneWidth, zoneHeight, imageMask, newImageFeaturesMaxSize, newImageFeatures ); } #ifdef DEBUG_SHOW_NEW_FEATURES cv::Mat imageCopy; image.copyTo(imageCopy); for (int i = 1; i < zonesInARow; ++i) { cv::line(imageCopy, cv::Point(i * zoneWidth, 0), cv::Point(i * zoneWidth, imageCopy.rows), cv::Scalar(0, 255, 0)); cv::line(imageCopy, cv::Point(0, i * zoneHeight), cv::Point(imageCopy.cols, i * zoneHeight), cv::Scalar(0, 255, 0)); } int featuresPredictionSize = featuresPrediction.size(); for (int i = 0; i < featuresPredictionSize; ++i) { ImageFeaturePrediction *currFeaturePrediction = featuresPrediction[i]; drawUncertaintyEllipse2D( imageCopy, cv::Point2f(currFeaturePrediction->imagePos[0], currFeaturePrediction->imagePos[1]), currFeaturePrediction->covarianceMatrix, 2 * (image.cols + image.rows), cv::Scalar(0, 255, 0), false ); } cv::namedWindow("Busqueda de nuevos features: mascara"); cv::imshow("Busqueda de nuevos features: mascara", imageMask); cv::waitKey(0); for (int i = 0; i < newImageFeatures.size(); ++i) { drawPoint(imageCopy, newImageFeatures[i]->imagePos, cv::Scalar(0, 255, 255)); } cv::namedWindow("Busqueda de nuevos features: imagen con nuevos features"); cv::imshow("Busqueda de nuevos features: imagen con nuevos features", imageCopy); cv::waitKey(0); // Se borran todas las ventanas creadas cv::destroyWindow("Busqueda de nuevos features: mascara"); cv::destroyWindow("Busqueda de nuevos features: imagen con nuevos features"); #endif }
int main() { // Read input image cv::Mat image= cv::imread("../images/group.jpg"); if (!image.data) return 0; // Display the image cv::namedWindow("Original Image"); cv::imshow("Original Image",image); // Get the binary map cv::Mat binary; binary= cv::imread("../images/binary.bmp",0); // Display the binary image cv::namedWindow("Binary Image"); cv::imshow("Binary Image",binary); // Eliminate noise and smaller objects cv::Mat fg; cv::erode(binary,fg,cv::Mat(),cv::Point(-1,-1),6); // Display the foreground image cv::namedWindow("Foreground Image"); cv::imshow("Foreground Image",fg); // Identify image pixels without objects cv::Mat bg; cv::dilate(binary,bg,cv::Mat(),cv::Point(-1,-1),6); cv::threshold(bg,bg,1,128,cv::THRESH_BINARY_INV); // Display the background image cv::namedWindow("Background Image"); cv::imshow("Background Image",bg); // Show markers image cv::Mat markers(binary.size(),CV_8U,cv::Scalar(0)); markers= fg+bg; cv::namedWindow("Markers"); cv::imshow("Markers",markers); // Create watershed segmentation object WatershedSegmenter segmenter; // Set markers and process segmenter.setMarkers(markers); segmenter.process(image); // Display segmentation result cv::namedWindow("Segmentation"); cv::imshow("Segmentation",segmenter.getSegmentation()); // Display watersheds cv::namedWindow("Watersheds"); cv::imshow("Watersheds",segmenter.getWatersheds()); // Open another image image= cv::imread("../images/tower.jpg"); if (!image.data) return 0; // Identify background pixels cv::Mat imageMask(image.size(),CV_8U,cv::Scalar(0)); cv::rectangle(imageMask,cv::Point(5,5),cv::Point(image.cols-5,image.rows-5),cv::Scalar(255),3); // Identify foreground pixels (in the middle of the image) cv::rectangle(imageMask,cv::Point(image.cols/2-10,image.rows/2-10), cv::Point(image.cols/2+10,image.rows/2+10),cv::Scalar(1),10); // Set markers and process segmenter.setMarkers(imageMask); segmenter.process(image); // Display the image with markers cv::rectangle(image,cv::Point(5,5),cv::Point(image.cols-5,image.rows-5),cv::Scalar(255,255,255),3); cv::rectangle(image,cv::Point(image.cols/2-10,image.rows/2-10), cv::Point(image.cols/2+10,image.rows/2+10),cv::Scalar(1,1,1),10); cv::namedWindow("Image with marker"); cv::imshow("Image with marker",image); // Display watersheds cv::namedWindow("Watersheds of foreground object"); cv::imshow("Watersheds of foreground object",segmenter.getWatersheds()); // Open another image image= cv::imread("../images/tower.jpg"); // define bounding rectangle cv::Rect rectangle(50,70,image.cols-150,image.rows-180); cv::Mat result; // segmentation result (4 possible values) cv::Mat bgModel,fgModel; // the models (internally used) // GrabCut segmentation cv::grabCut(image, // input image result, // segmentation result rectangle,// rectangle containing foreground bgModel,fgModel, // models 1, // number of iterations cv::GC_INIT_WITH_RECT); // use rectangle // Get the pixels marked as likely foreground cv::compare(result,cv::GC_PR_FGD,result,cv::CMP_EQ); // Generate output image cv::Mat foreground(image.size(),CV_8UC3,cv::Scalar(255,255,255)); image.copyTo(foreground,result); // bg pixels not copied // draw rectangle on original image cv::rectangle(image, rectangle, cv::Scalar(255,255,255),1); cv::namedWindow("Image"); cv::imshow("Image",image); // display result cv::namedWindow("Segmented Image"); cv::imshow("Segmented Image",foreground); // Open another image image= cv::imread("../images/group.jpg"); // define bounding rectangle cv::Rect rectangle2(10,100,380,180); cv::Mat bkgModel,fgrModel; // the models (internally used) // GrabCut segmentation cv::grabCut(image, // input image result, // segmentation result rectangle2,bkgModel,fgrModel,5,cv::GC_INIT_WITH_RECT); // Get the pixels marked as likely foreground // cv::compare(result,cv::GC_PR_FGD,result,cv::CMP_EQ); result= result&1; foreground.create(image.size(),CV_8UC3); foreground.setTo(cv::Scalar(255,255,255)); image.copyTo(foreground,result); // bg pixels not copied // draw rectangle on original image cv::rectangle(image, rectangle2, cv::Scalar(255,255,255),1); cv::namedWindow("Image 2"); cv::imshow("Image 2",image); // display result cv::namedWindow("Foreground objects"); cv::imshow("Foreground objects",foreground); cv::waitKey(); return 0; }
void matchPredictedFeatures( const cv::Mat& image, const VectorMapFeature& features, const VectorImageFeaturePrediction& vectorfeaturePrediction, VectorFeatureMatch &matches ) { ConfigurationManager &configManager = ConfigurationManager::getInstance(); cv::Mat imageMask( cv::Mat::zeros(image.rows, image.cols, CV_8UC1) ); size_t vectorfeaturePredictionSize = vectorfeaturePrediction.size(); // armo la mascara, para evitar buscar features fuera de los elipses de los features predichos for (int i = 0; i < vectorfeaturePredictionSize; ++i) { ImageFeaturePrediction *featurePrediction = vectorfeaturePrediction[i]; drawUncertaintyEllipse2D( imageMask, cv::Point2d(featurePrediction->imagePos[0], featurePrediction->imagePos[1]), featurePrediction->covarianceMatrix, 2.0L*MAX(configManager.cameraCalibration->pixelsX, configManager.cameraCalibration->pixelsY), ellipseColorWhite, true ); } // detectamos features std::vector<cv::KeyPoint> imageKeypoints; configManager.featureDetector->detect(image, imageKeypoints, imageMask); // extraemos descriptores cv::Mat descriptors; configManager.descriptorExtractor->compute(image, imageKeypoints, descriptors); // IMPORTANTE: el size del imageKeypoints se DEBE tomar despues de extraer descriptores // ya que este metodo puede eliminar keypoints sobre los que no se haya podido computar // los descriptores. size_t imageKeypointsSize = imageKeypoints.size(); for (int i = 0; i < vectorfeaturePredictionSize; ++i) { cv::Mat mask(cv::Mat::zeros(1, descriptors.rows, CV_8UC1)); ImageFeaturePrediction *featurePrediction = vectorfeaturePrediction[i]; MapFeature *currMapFeature= features[featurePrediction->featureIndex]; cv::Size2f axesSize(0.0f, 0.0f); double angle = 0.0L; matrix2x2ToUncertaintyEllipse2D(featurePrediction->covarianceMatrix, axesSize, angle); for (int j = 0; j < imageKeypointsSize; ++j) { // Si el feature cae dentro de la elipse, agregarlo como posible candidato if ( pointIsInsideEllipse( imageKeypoints[j].pt, cv::Point2d(featurePrediction->imagePos[0], featurePrediction->imagePos[1]), axesSize, angle ) ) { mask.at<uchar>(0, j) = true; } } std::vector<cv::DMatch> currFeatureMatch; matchICDescriptors( currMapFeature->descriptor, descriptors, currFeatureMatch, mask ); // Si encontramos algun match lo agregamos al vector de resultado if (currFeatureMatch.size() > 0) { cv::KeyPoint& matchedKeypoint = imageKeypoints[currFeatureMatch[0].trainIdx]; // Agregamos un match al resultado FeatureMatch *newMatch = new FeatureMatch; newMatch->featureIndex = featurePrediction->featureIndex; newMatch->imagePos[0] = static_cast<double>(matchedKeypoint.pt.x); newMatch->imagePos[1] = static_cast<double>(matchedKeypoint.pt.y); newMatch->distance = currFeatureMatch[0].distance; descriptors.row(currFeatureMatch[0].trainIdx).copyTo(newMatch->imagePosDescriptor); matches.push_back(newMatch); } } }