void buildImageMask( const VectorImageFeaturePrediction &inlierPredictions, cv::Mat &imageMask ) { // Filtramos las zonas alrededor de cada prediccion size_t inlierPredictionsSize = inlierPredictions.size(); cv::Scalar colorBlack(0, 0, 0); // Para cada prediccion, se dibuja dentro de la mascara su elipse de incertidumbre // De esta forma se previene el hecho de agregar nuevos features dentro de // elipses de busqueda de features anteriores for (int i = 0; i < inlierPredictionsSize; ++i) { ImageFeaturePrediction *inlierPrediction = inlierPredictions[i]; drawUncertaintyEllipse2D( imageMask, cv::Point2d(inlierPrediction->imagePos[0], inlierPrediction->imagePos[1]), inlierPrediction->covarianceMatrix, 2 * (imageMask.rows + imageMask.cols), colorBlack, true ); } }
void detectNewImageFeatures( const cv::Mat image, const VectorImageFeaturePrediction &featuresPrediction, uint newImageFeaturesMaxSize, VectorImageFeatureMeasurement &newImageFeatures ) { newImageFeatures.clear(); // Calculamos el tamaño de las zonas de la imágen según los parámetros configurados ConfigurationManager& configManager = ConfigurationManager::getInstance(); //int zonesInARow = exp2f(configManager.ekfParams->detectNewFeaturesImageAreasDivideTimes); int zonesInARow = 1; int zoneWidth = image.cols / zonesInARow; int zoneHeight = image.rows / zonesInARow; // Construimos la mascara para buscar features solamente en zonas poco densas cv::Mat imageMask( cv::Mat::ones(image.rows, image.cols, CV_8UC1) * 255 ); buildImageMask( featuresPrediction, imageMask ); // Detectamos features std::vector<cv::KeyPoint> imageKeypoints; configManager.featureDetector->detect(image, imageKeypoints, imageMask); // Extraemos descriptores cv::Mat descriptors; configManager.descriptorExtractor->compute(image, imageKeypoints, descriptors); // Caso particular: la cantidad de features encontrados no supera los pedidos size_t imageKeypointsSize = imageKeypoints.size(); #ifdef DEBUG std::cout << "Cantidad de features detectados al agregar nuevos: " << imageKeypointsSize << std::endl; #endif if (imageKeypointsSize <= newImageFeaturesMaxSize) { double imagePos[2]; for (int i = 0; i < imageKeypointsSize; ++i) { cv::KeyPoint &currKeypoint = imageKeypoints[i]; imagePos[0] = currKeypoint.pt.x; imagePos[1] = currKeypoint.pt.y; newImageFeatures.push_back( new ImageFeatureMeasurement( imagePos, descriptors.row(i) ) ); } } else { // Buscamos nuevos features intentando que esten // lo mejor distribuidos posible en la imagen searchFeaturesByZone( featuresPrediction, imageKeypoints, descriptors, zonesInARow, zoneWidth, zoneHeight, imageMask, newImageFeaturesMaxSize, newImageFeatures ); } #ifdef DEBUG_SHOW_NEW_FEATURES cv::Mat imageCopy; image.copyTo(imageCopy); for (int i = 1; i < zonesInARow; ++i) { cv::line(imageCopy, cv::Point(i * zoneWidth, 0), cv::Point(i * zoneWidth, imageCopy.rows), cv::Scalar(0, 255, 0)); cv::line(imageCopy, cv::Point(0, i * zoneHeight), cv::Point(imageCopy.cols, i * zoneHeight), cv::Scalar(0, 255, 0)); } int featuresPredictionSize = featuresPrediction.size(); for (int i = 0; i < featuresPredictionSize; ++i) { ImageFeaturePrediction *currFeaturePrediction = featuresPrediction[i]; drawUncertaintyEllipse2D( imageCopy, cv::Point2f(currFeaturePrediction->imagePos[0], currFeaturePrediction->imagePos[1]), currFeaturePrediction->covarianceMatrix, 2 * (image.cols + image.rows), cv::Scalar(0, 255, 0), false ); } cv::namedWindow("Busqueda de nuevos features: mascara"); cv::imshow("Busqueda de nuevos features: mascara", imageMask); cv::waitKey(0); for (int i = 0; i < newImageFeatures.size(); ++i) { drawPoint(imageCopy, newImageFeatures[i]->imagePos, cv::Scalar(0, 255, 255)); } cv::namedWindow("Busqueda de nuevos features: imagen con nuevos features"); cv::imshow("Busqueda de nuevos features: imagen con nuevos features", imageCopy); cv::waitKey(0); // Se borran todas las ventanas creadas cv::destroyWindow("Busqueda de nuevos features: mascara"); cv::destroyWindow("Busqueda de nuevos features: imagen con nuevos features"); #endif }
void searchFeaturesByZone( const VectorImageFeaturePrediction &featuresPrediction, std::vector<cv::KeyPoint> &imageKeypoints, cv::Mat &descriptors, int zonesInARow, int zoneWidth, int zoneHeight, cv::Mat& imageMask, int imageFeaturesMaxSize, VectorImageFeatureMeasurement &newImageFeatures ) { int zonesCount = zonesInARow * zonesInARow; // Inicializamos el ZoneInfo para cada zona. ZoneInfo **zonesInfo = new ZoneInfo *[zonesCount]; for (int i = 0; i < zonesCount; ++i) { ZoneInfo *currZoneInfo = new ZoneInfo; currZoneInfo->candidateImageFeatureMeasurementLeft = 0; currZoneInfo->predictionsPlusFeaturesCount = 0; currZoneInfo->zoneId = i; zonesInfo[i] = currZoneInfo; } groupImageFeaturesAndPredictionsByZone( featuresPrediction, imageKeypoints, descriptors, zoneWidth, zoneHeight, imageMask.cols, imageMask.rows, zonesCount, zonesInfo ); // Ordenamos el arreglo de ZoneInfo por su cantidad de predicciones tal que su media cae dentro de la zona qsort(zonesInfo, zonesCount, sizeof(ZoneInfo *), &sortCompare); // Convertimos el arreglo de ZoneInfo a lista para lograr mayor performance en la eliminacion std::list< ZoneInfo * > zoneInfoList; for (int i = 0; i < zonesCount; ++i) { zoneInfoList.push_back( zonesInfo[i] ); } // Agregamos los features al resultado newImageFeatures.reserve(imageFeaturesMaxSize); double ellipseSize = ConfigurationManager::getInstance().ekfParams->detectNewFeaturesImageMaskEllipseSize; Matd newFeatMeasToAddEllipse( (Matd(2, 2) << ellipseSize, 0.0L, 0.0L, ellipseSize) ); int zonesLeft = zonesCount; while(zonesLeft > 0 && imageFeaturesMaxSize > 0) { ZoneInfo *currZoneInfo = zoneInfoList.front(); int &currZoneFeaturesLeft = currZoneInfo->candidateImageFeatureMeasurementLeft; // Si no hay mas features en la zona, la quitamos de la lista if (currZoneFeaturesLeft == 0) { zoneInfoList.pop_front(); #ifdef DEBUG_SHOW_NEW_FEATURES std::cout << "Se elimino la zona " << currZoneInfo->zoneId << " por no tener más features candidatos restantes." << std::endl; std::cout << "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" << std::endl << std::endl; #endif zonesLeft--; } else { // Tomamos un feature de forma aleatoria int randKeypointIdx = rand() % currZoneFeaturesLeft; ImageFeatureMeasurement *imFeatMeasToAdd = currZoneInfo->candidateImageFeatureMeasurement[randKeypointIdx]; int imFeatMeasToAddX = static_cast<int>(imFeatMeasToAdd->imagePos[0]); int imFeatMeasToAddY = static_cast<int>(imFeatMeasToAdd->imagePos[1]); if ( imageMask.at<uchar>(imFeatMeasToAddY, imFeatMeasToAddX) ) { // Agregamos el feature al resultado y utilizamos el constructor por copia // Aca si que copiamos el descriptor, para poder devolverlo newImageFeatures.push_back( new ImageFeatureMeasurement(*imFeatMeasToAdd) ); currZoneInfo->predictionsPlusFeaturesCount++; #ifdef DEBUG_SHOW_NEW_FEATURES std::cout << "Agregamos feature en la zona " << currZoneInfo->zoneId << std::endl; std::cout << "Hay ahora " << currZoneInfo->predictionsPlusFeaturesCount << " predicciones y features" << std::endl; std::cout << "Quedan " << currZoneInfo->candidateImageFeatureMeasurementLeft << " feature candidatos por agregar" << std::endl; std::cout << "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" << std::endl << std::endl; #endif // Reordeno la lista std::list< ZoneInfo * >::iterator itCurr = zoneInfoList.begin(); std::list< ZoneInfo * >::iterator itNext = zoneInfoList.begin(); std::list< ZoneInfo * >::iterator itEnd = zoneInfoList.end(); itNext++; // Seguimos recorriendo while(itNext != itEnd) { ZoneInfo *nextZoneInfo = static_cast<ZoneInfo *>(*itNext); // Si la cantidad de features y predicciones de la zona actual // supera o iguala a la cantidad de features y predicciones de la siguiente zona en la lista, // cambio el orden de la zona actual por la siguiente, para asegurar el orden total // de las zonas con respecto a la cantidad de features if (currZoneInfo->predictionsPlusFeaturesCount >= nextZoneInfo->predictionsPlusFeaturesCount) { (*itNext) = currZoneInfo; (*itCurr) = nextZoneInfo; } else { break; } itCurr++; itNext++; } // Actualizamos la mascara drawUncertaintyEllipse2D( imageMask, cv::Point2d(imFeatMeasToAdd->imagePos[0], imFeatMeasToAdd->imagePos[1]), newFeatMeasToAddEllipse, 2 * (imageMask.cols + imageMask.rows), cv::Scalar(0, 0, 0), true ); // Actualizamos la cantidad de features buscada imageFeaturesMaxSize--; } // Nos aseguramos que el mismo feature no sea reelecto currZoneFeaturesLeft--; currZoneInfo->candidateImageFeatureMeasurement[randKeypointIdx] = currZoneInfo->candidateImageFeatureMeasurement[currZoneFeaturesLeft]; delete imFeatMeasToAdd; } } // Liberamos toda la memoria auxiliar utilizada for (int i = 0; i < zonesCount; ++i) { ZoneInfo *currZoneInfo = zonesInfo[i]; for (int j = 0; j < currZoneInfo->candidateImageFeatureMeasurementLeft; ++j) { delete currZoneInfo->candidateImageFeatureMeasurement[j]; } delete currZoneInfo; } delete [] zonesInfo; }
void matchPredictedFeatures( const cv::Mat& image, const VectorMapFeature& features, const VectorImageFeaturePrediction& vectorfeaturePrediction, VectorFeatureMatch &matches ) { ConfigurationManager &configManager = ConfigurationManager::getInstance(); cv::Mat imageMask( cv::Mat::zeros(image.rows, image.cols, CV_8UC1) ); size_t vectorfeaturePredictionSize = vectorfeaturePrediction.size(); // armo la mascara, para evitar buscar features fuera de los elipses de los features predichos for (int i = 0; i < vectorfeaturePredictionSize; ++i) { ImageFeaturePrediction *featurePrediction = vectorfeaturePrediction[i]; drawUncertaintyEllipse2D( imageMask, cv::Point2d(featurePrediction->imagePos[0], featurePrediction->imagePos[1]), featurePrediction->covarianceMatrix, 2.0L*MAX(configManager.cameraCalibration->pixelsX, configManager.cameraCalibration->pixelsY), ellipseColorWhite, true ); } // detectamos features std::vector<cv::KeyPoint> imageKeypoints; configManager.featureDetector->detect(image, imageKeypoints, imageMask); // extraemos descriptores cv::Mat descriptors; configManager.descriptorExtractor->compute(image, imageKeypoints, descriptors); // IMPORTANTE: el size del imageKeypoints se DEBE tomar despues de extraer descriptores // ya que este metodo puede eliminar keypoints sobre los que no se haya podido computar // los descriptores. size_t imageKeypointsSize = imageKeypoints.size(); for (int i = 0; i < vectorfeaturePredictionSize; ++i) { cv::Mat mask(cv::Mat::zeros(1, descriptors.rows, CV_8UC1)); ImageFeaturePrediction *featurePrediction = vectorfeaturePrediction[i]; MapFeature *currMapFeature= features[featurePrediction->featureIndex]; cv::Size2f axesSize(0.0f, 0.0f); double angle = 0.0L; matrix2x2ToUncertaintyEllipse2D(featurePrediction->covarianceMatrix, axesSize, angle); for (int j = 0; j < imageKeypointsSize; ++j) { // Si el feature cae dentro de la elipse, agregarlo como posible candidato if ( pointIsInsideEllipse( imageKeypoints[j].pt, cv::Point2d(featurePrediction->imagePos[0], featurePrediction->imagePos[1]), axesSize, angle ) ) { mask.at<uchar>(0, j) = true; } } std::vector<cv::DMatch> currFeatureMatch; matchICDescriptors( currMapFeature->descriptor, descriptors, currFeatureMatch, mask ); // Si encontramos algun match lo agregamos al vector de resultado if (currFeatureMatch.size() > 0) { cv::KeyPoint& matchedKeypoint = imageKeypoints[currFeatureMatch[0].trainIdx]; // Agregamos un match al resultado FeatureMatch *newMatch = new FeatureMatch; newMatch->featureIndex = featurePrediction->featureIndex; newMatch->imagePos[0] = static_cast<double>(matchedKeypoint.pt.x); newMatch->imagePos[1] = static_cast<double>(matchedKeypoint.pt.y); newMatch->distance = currFeatureMatch[0].distance; descriptors.row(currFeatureMatch[0].trainIdx).copyTo(newMatch->imagePosDescriptor); matches.push_back(newMatch); } } }