Ejemplo n.º 1
0
// --------------------------------------------------------------------------------------------------------------------
// Rescata los ouliers que se consideran buenos
// IMPORTANTE: Asume que outlierMatches, outlierMatchFeaturePrediction y (por fuera outlierMatchFeaturePredictionJacobians)
// tienen la misma cantidad de elementos y se corresponden en cada posicion los unos con los otros.
// --------------------------------------------------------------------------------------------------------------------
void rescueOutliers(const VectorFeatureMatch &outlierMatches,
                    const VectorImageFeaturePrediction &outlierMatchFeaturePrediction,
                    const VectorMatd &outlierMatchFeaturePredictionJacobians,
                    VectorFeatureMatch &rescuedMatches,
                    VectorImageFeaturePrediction &rescuedPredictions,
                    VectorMatd &rescuedJacobians)
{
    ExtendedKalmanFilterParameters *ekfParams = ConfigurationManager::getInstance().ekfParams;
    std::vector<int> rescuedMatchesIndexes;

    size_t outlierMatchesSize = outlierMatches.size();
    for (uint i = 0; i < outlierMatchesSize; ++i)
    {
        FeatureMatch *match = outlierMatches[i];
        ImageFeaturePrediction *prediction = outlierMatchFeaturePrediction[i];

        Matd dist = (Matd(2, 1) << match->imagePos[0] - prediction->imagePos[0],
                                   match->imagePos[1] - prediction->imagePos[1]);

        // Se verifica que sea un buen match
        // http://es.wikipedia.org/wiki/Propagaci%C3%B3n_de_errores
        // Se propaga el error desde la funcion que calcula la prediccion a la funcion
        // que calcula la distancia entre la prediccion y el match
        // entonces, el calculo de esa distancia, tiene que tener un error menor a ransacChi2Threshold
        // Osea que es una forma de decir que la confianza que se tiene sobre dicha distancia es muy grande
        // y queda en funcion de la matriz de covarianza de la prediccion
        if ( Matd( dist.t() * prediction->covarianceMatrix.inv() * dist)[0][0] < ekfParams->ransacChi2Threshold )
        {
            rescuedMatchesIndexes.push_back(i);
        }
    }

    // Se agregan los resultados para los indices rescatados.
    rescuedMatches.clear();
    rescuedPredictions.clear();
    rescuedJacobians.clear();

    // Se reserva lugar para que los push_back sean en O(1)
    rescuedMatches.reserve(rescuedMatchesIndexes.size());
    rescuedPredictions.reserve(rescuedMatchesIndexes.size());
    rescuedJacobians.reserve(rescuedMatchesIndexes.size());

    for (uint i = 0; i < rescuedMatchesIndexes.size(); ++i)
    {
        int index = rescuedMatchesIndexes[i];

        rescuedMatches.push_back(outlierMatches[index]);
        rescuedPredictions.push_back(outlierMatchFeaturePrediction[index]);
        rescuedJacobians.push_back(outlierMatchFeaturePredictionJacobians[index]);
    }

}
// ------------------------------------------------------------------------------------------------------------
// Agrupa un conjunto de features y predicciones por zona, y devuelve el zoneInfo para cada zona.
void groupImageFeaturesAndPredictionsByZone( const VectorImageFeaturePrediction &featuresPrediction,
                                             std::vector<cv::KeyPoint> &imageKeypoints,
                                             cv::Mat &keypointsDescriptors,
                                             int zoneWidth, int zoneHeight,
                                             int totalWidth, int totalHeight,
                                             int zonesCount, ZoneInfo **zonesInfo )
{
    size_t imageKeypointsSize = imageKeypoints.size();
    for (uint i = 0; i < imageKeypointsSize; ++i)
    {
        cv::KeyPoint &currKeypoint = imageKeypoints[i];

        double imagePos[2] = {currKeypoint.pt.x, currKeypoint.pt.y};
        ImageFeatureMeasurement *imageFeature = new ImageFeatureMeasurement(imagePos, keypointsDescriptors.row(i));

        int currFeatureZoneId = getPointZone(imageFeature->imagePos, zoneWidth, zoneHeight, totalWidth, totalHeight);

        ZoneInfo *currZoneInfo = zonesInfo[currFeatureZoneId];
        currZoneInfo->candidateImageFeatureMeasurement.push_back(imageFeature);
        currZoneInfo->candidateImageFeatureMeasurementLeft++;
    }

    size_t featuresPredictionSize = featuresPrediction.size();
    for (uint i = 0; i < featuresPredictionSize; ++i)
    {
        int currPredZoneId = getPointZone(featuresPrediction[i]->imagePos, zoneWidth, zoneHeight, totalWidth, totalHeight);

        ZoneInfo *currZoneInfo = zonesInfo[currPredZoneId];
        currZoneInfo->featuresPrediction.push_back(featuresPrediction[i]);
        currZoneInfo->predictionsPlusFeaturesCount++;
    }
}
void buildImageMask( const VectorImageFeaturePrediction &inlierPredictions,
                     cv::Mat &imageMask )
{
    // Filtramos las zonas alrededor de cada prediccion
    size_t inlierPredictionsSize = inlierPredictions.size();

    cv::Scalar colorBlack(0, 0, 0);

    // Para cada prediccion, se dibuja dentro de la mascara su elipse de incertidumbre
    // De esta forma se previene el hecho de agregar nuevos features dentro de
    // elipses de busqueda de features anteriores
    for (int i = 0; i < inlierPredictionsSize; ++i)
    {
        ImageFeaturePrediction *inlierPrediction = inlierPredictions[i];

        drawUncertaintyEllipse2D( imageMask,
                                  cv::Point2d(inlierPrediction->imagePos[0], inlierPrediction->imagePos[1]),
                                  inlierPrediction->covarianceMatrix,
                                  2 * (imageMask.rows + imageMask.cols),
                                  colorBlack,
                                  true );
    }
}
Ejemplo n.º 4
0
// --------------------------------------------------------------------------------------------------------------------
// Devuelve los indices (posiciones del arreglo de matches) de los matches que estan por debajo de un threshold.
// Se devuelven estos indices, para aprovechar que los matches, las predicciones y los jacobianos estan en arreglos
// con el mismo orden
// --------------------------------------------------------------------------------------------------------------------
void matchesBelowAThreshold(const VectorFeatureMatch &matches,
                            const VectorImageFeaturePrediction &predictedImageFeatures,
                            double threshold,
                            std::vector<int> &supportIndexesInMatchesVector)
{
    // Para cada feature predicho se busca su match y se calcula la distancia
    for (uint i = 0; i < predictedImageFeatures.size(); ++i)
    {
        ImageFeaturePrediction *prediction = predictedImageFeatures[i];

        bool wasFound = false;
        int matchIndex = 0;
        while (!wasFound && matchIndex < matches.size())
        {
            FeatureMatch *match = matches[matchIndex];

            // Se calcula la distancia y se cuenta si es buen feature.
            if (match->featureIndex == prediction->featureIndex)
            {
                double xDist = match->imagePos[0] - prediction->imagePos[0];
                double yDist = match->imagePos[1] - prediction->imagePos[1];
                double dist = sqrt(xDist*xDist + yDist*yDist);

                if (dist < threshold)
                {
                    // Si es un buen match se guarda el indice en el arreglo de matches para luego
                    // obtener la prediccion y los jacobianos asociados.(que se encuentran en el mismo orden que el arreglo matches)
                    supportIndexesInMatchesVector.push_back(matchIndex);
                }

                wasFound = true;
            }

            matchIndex++;
        }
    }
}
Ejemplo n.º 5
0
void updateMapFeatures(const VectorImageFeaturePrediction &predictedDistortedFeatures, const VectorFeatureMatch& inlierMatches, State &state)
#endif
{
    size_t inlierMatchesSize = inlierMatches.size();
    size_t predictedDistortedFeaturesSize = predictedDistortedFeatures.size();
    
    for (int i = 0; i < predictedDistortedFeaturesSize; ++i)
    {
        int featureIndex = predictedDistortedFeatures[i]->featureIndex;
        MapFeature *currMapFeature = state.mapFeatures[featureIndex];
        currMapFeature->timesPredicted++;
    }
    
    for (int i = 0; i < inlierMatchesSize; ++i)
    {
        int featureIndex = inlierMatches[i]->featureIndex;
        MapFeature *currMapFeature = state.mapFeatures[featureIndex];
        currMapFeature->timesMatched++;
    }
#if defined(DEBUG_SHOW_IMAGES)
    // Asigno los valores correctos para la posicion en
    // la imagen de la ultima vez que se vieron los features
    cleanFeaturesLastImagePos(state.mapFeatures);
    updateFeaturesLastImagePos(state.mapFeatures, inlierMatches);
#endif
    
    // Actualizamos los descriptores de los inliers
    for (int i = 0; i < inlierMatchesSize; i++)
    {
        FeatureMatch *currFeatureMatch = inlierMatches[i];
        MapFeature *currMapFeature = state.mapFeatures[currFeatureMatch->featureIndex];
        
        assert(currMapFeature->descriptor.cols == currFeatureMatch->imagePosDescriptor.cols);
        
        currFeatureMatch->imagePosDescriptor.copyTo(currMapFeature->descriptor);
    }    
}
Ejemplo n.º 6
0
// --------------------------------------------------------------------------------------------------------------------
// Realiza el ciclo de ransac, donde se obtienen los matches considerados inliers.
// Esta informacion se devuelve, para por fuera, rescatar outliers y poder hacer una mejor estimacion
// En inlierJacobianIndexes se guardan los indices(posicion en el arreglo) de las predicciones,
// ya que se corresponden con la posicion del los jacobianos asociados y
// que son necesarios para hacer el update.
// --------------------------------------------------------------------------------------------------------------------
void ransac( const State &state, Matd &covariance,
             const VectorImageFeaturePrediction &predictedMatchedFeatures,
             const VectorMatd &predictedMatchedJacobians,
             const VectorFeatureMatch &matches,
             VectorFeatureMatch &inlierMatches,
             VectorImageFeaturePrediction &inlierPredictions,
             VectorMatd &inlierJacobians,
             VectorFeatureMatch &outlierMatches )
{
    size_t matchesSize = matches.size();
    if (matches.size() == 0)
    {
        return;
    }

    uint numberOfHipotesis = 1000;

    ExtendedKalmanFilterParameters *ekfParams = ConfigurationManager::getInstance().ekfParams;

    // threshold para buscar los matches 2 * sigma_pixels
    double threshold = ekfParams->ransacThresholdPredictDistance;

    std::vector<int> inlierIndexesInMatchesVector;

    for (uint i = 0; i < numberOfHipotesis && i < matchesSize; ++i)
    {
        // seleccionar matches random
        FeatureMatch *match = NULL;
        selectRandomMatch(matches, i, match);

        // Se toma la prediccion correspondiente el matching
        ImageFeaturePrediction *predictedMatchedFeature = predictedMatchedFeatures[i];

        // Se genera un arreglo para el feature predicho.
        VectorImageFeaturePrediction prediction;
        prediction.push_back(predictedMatchedFeature);

        VectorFeatureMatch matching;
        matching.push_back(match);

        // Se buscan los 2 jacobianos del feature
        VectorMatd jacobians;
        jacobians.push_back(predictedMatchedJacobians[i]);

        // Se hace un update parcial (solo para el estado)
        // se copia el estado actual
        State temporalState(state);

        updateOnlyState(prediction, matching, jacobians, temporalState, covariance);

        // Se predicen TODAS las mediciones con el estado actualizado
        VectorImageFeaturePrediction predictedImageFeatures;
        VectorMapFeature unseenFeatures;
        std::vector<int> mapFeatureIndexes;
        // se pasa mapFeatureIndexes vacio para que sepa que son para todos los features.
        predictMeasurementState(temporalState, temporalState.mapFeatures, mapFeatureIndexes, predictedImageFeatures, unseenFeatures);

        // Encontrar matches debajo de un threshold
        std::vector<int> supportIndexesInMatchesVector;

        matchesBelowAThreshold(matches, predictedImageFeatures, threshold, supportIndexesInMatchesVector);

        // Si la cantidad de matches encontrado es mayor que la que se tiene actualmente, actualizar.
        if (supportIndexesInMatchesVector.size() > inlierIndexesInMatchesVector.size())
        {
#ifdef DEBUG_SHOW_RANSAC_INFO
            std::string windowName = "Inliers en el ciclo de ransac, prediccion hecha en base a un solo match";
            showRansacInliers(matches, predictedImageFeatures, supportIndexesInMatchesVector, match, windowName);
#endif
            // Actualizo el conjunto de inliers
            inlierIndexesInMatchesVector = supportIndexesInMatchesVector;

            // Se actualiza la cantidad de iteraciones
            // 1 - w
            double e = 1.0L - (double)inlierIndexesInMatchesVector.size()/(double)matches.size();

            numberOfHipotesis = static_cast<int>( log(1.0L - ekfParams->ransacAllInliersProbability)/log(1.0L - (1.0L - e)) );
        }
        
        // Libero la memoria auxiliar
        size_t predictedImageFeaturesSize = predictedImageFeatures.size();
        for (uint j = 0; j < predictedImageFeaturesSize; ++j)
        {
            delete predictedImageFeatures[j];
        }
    }

    // Se devuelven los matches, predicciones y jacobianos de los indices (del vector matches) inliers
    inlierMatches.clear();
    inlierPredictions.clear();
    inlierJacobians.clear();
    outlierMatches.clear();

    // Se reserva lugar para que los push_back sean en O(1)
    inlierMatches.reserve(inlierIndexesInMatchesVector.size());
    inlierPredictions.reserve(inlierIndexesInMatchesVector.size());
    inlierJacobians.reserve(inlierIndexesInMatchesVector.size());
    outlierMatches.reserve(matches.size() - inlierIndexesInMatchesVector.size());

    // Se arma una mascara para identificar los lugares de matches donde estan los inliers
    std::vector<bool> inlierMatchesMask(matches.size(), false);

    // FIXME: Esto se puede hacer sin la mascara usando la imformacion de que
    // los inliers mantienen el orden relativo que tienen en el vector "matches"
    for (uint i = 0; i < inlierIndexesInMatchesVector.size(); ++i)
    {
        int index = inlierIndexesInMatchesVector[i];

        inlierMatchesMask[index] = true;
    }

    // Con la mascara se separan los matches inliers y los outliers
    for (uint i = 0; i < inlierMatchesMask.size(); ++i)
    {
        bool is_inlier = inlierMatchesMask[i];

        if (is_inlier)
        {
            inlierMatches.push_back(matches[i]);
            inlierPredictions.push_back(predictedMatchedFeatures[i]);
            inlierJacobians.push_back(predictedMatchedJacobians[i]);
        }
        else
        {
            outlierMatches.push_back(matches[i]);
        }
    }

#ifdef DEBUG
    std::cout << "cantidad de matches: " << matches.size() << std::endl;
    std::cout << "cantidad de inliers: " << inlierMatches.size() << std::endl;
    std::cout << "cantidad de ouliers: " << outlierMatches.size() << std::endl;
#endif
}
void detectNewImageFeatures( const cv::Mat image,
                             const VectorImageFeaturePrediction &featuresPrediction,
                             uint newImageFeaturesMaxSize,
                             VectorImageFeatureMeasurement &newImageFeatures )
{
    newImageFeatures.clear();

    // Calculamos el tamaño de las zonas de la imágen según los parámetros configurados
    ConfigurationManager& configManager = ConfigurationManager::getInstance();
    
	//int zonesInARow = exp2f(configManager.ekfParams->detectNewFeaturesImageAreasDivideTimes);
	int zonesInARow = 1;

    int zoneWidth = image.cols / zonesInARow;
    int zoneHeight = image.rows / zonesInARow;

    // Construimos la mascara para buscar features solamente en zonas poco densas
    cv::Mat imageMask( cv::Mat::ones(image.rows, image.cols, CV_8UC1) * 255 );
    buildImageMask( featuresPrediction, imageMask );

    // Detectamos features
    std::vector<cv::KeyPoint> imageKeypoints;
    configManager.featureDetector->detect(image, imageKeypoints, imageMask);

    // Extraemos descriptores
    cv::Mat descriptors;
    configManager.descriptorExtractor->compute(image, imageKeypoints, descriptors);

    // Caso particular: la cantidad de features encontrados no supera los pedidos
    size_t imageKeypointsSize = imageKeypoints.size();

#ifdef DEBUG
    std::cout << "Cantidad de features detectados al agregar nuevos: " << imageKeypointsSize << std::endl;
#endif

    if (imageKeypointsSize <= newImageFeaturesMaxSize)
    {
        double imagePos[2];
        for (int i = 0; i < imageKeypointsSize; ++i)
        {
            cv::KeyPoint &currKeypoint = imageKeypoints[i];

            imagePos[0] = currKeypoint.pt.x;
            imagePos[1] = currKeypoint.pt.y;

            newImageFeatures.push_back( new ImageFeatureMeasurement( imagePos,
                                                                     descriptors.row(i) ) );
        }
    }
    else
    {
        // Buscamos nuevos features intentando que esten
        // lo mejor distribuidos posible en la imagen
        searchFeaturesByZone( featuresPrediction, imageKeypoints, descriptors,
                              zonesInARow, zoneWidth, zoneHeight,
                              imageMask,
                              newImageFeaturesMaxSize, newImageFeatures );
    }

#ifdef DEBUG_SHOW_NEW_FEATURES
    cv::Mat imageCopy;
    image.copyTo(imageCopy);
    for (int i = 1; i < zonesInARow; ++i)
    {
        cv::line(imageCopy, cv::Point(i * zoneWidth, 0), cv::Point(i * zoneWidth, imageCopy.rows), cv::Scalar(0, 255, 0));
        cv::line(imageCopy, cv::Point(0, i * zoneHeight), cv::Point(imageCopy.cols, i * zoneHeight), cv::Scalar(0, 255, 0));
    }

    int featuresPredictionSize = featuresPrediction.size();
    for (int i = 0; i < featuresPredictionSize; ++i)
    {
        ImageFeaturePrediction *currFeaturePrediction = featuresPrediction[i];

        drawUncertaintyEllipse2D( imageCopy,
                                  cv::Point2f(currFeaturePrediction->imagePos[0], currFeaturePrediction->imagePos[1]),
                                  currFeaturePrediction->covarianceMatrix,
                                  2 * (image.cols + image.rows),
                                  cv::Scalar(0, 255, 0),
                                  false );
    }

    cv::namedWindow("Busqueda de nuevos features: mascara");
    cv::imshow("Busqueda de nuevos features: mascara", imageMask);
    cv::waitKey(0);

    for (int i = 0; i < newImageFeatures.size(); ++i)
    {
        drawPoint(imageCopy, newImageFeatures[i]->imagePos, cv::Scalar(0, 255, 255));
    }

    cv::namedWindow("Busqueda de nuevos features: imagen con nuevos features");
    cv::imshow("Busqueda de nuevos features: imagen con nuevos features", imageCopy);
    cv::waitKey(0);

    // Se borran todas las ventanas creadas
    cv::destroyWindow("Busqueda de nuevos features: mascara");
    cv::destroyWindow("Busqueda de nuevos features: imagen con nuevos features");
#endif
}
Ejemplo n.º 8
0
void matchPredictedFeatures( const cv::Mat& image,
                             const VectorMapFeature& features,
                             const VectorImageFeaturePrediction& vectorfeaturePrediction,
                             VectorFeatureMatch &matches )
{
    ConfigurationManager &configManager = ConfigurationManager::getInstance();

    cv::Mat imageMask( cv::Mat::zeros(image.rows, image.cols, CV_8UC1) );

    size_t vectorfeaturePredictionSize = vectorfeaturePrediction.size();

    // armo la mascara, para evitar buscar features fuera de los elipses de los features predichos
    for (int i = 0; i < vectorfeaturePredictionSize; ++i)
    {
        ImageFeaturePrediction *featurePrediction = vectorfeaturePrediction[i];
        drawUncertaintyEllipse2D( imageMask,
                                  cv::Point2d(featurePrediction->imagePos[0], featurePrediction->imagePos[1]),
                                  featurePrediction->covarianceMatrix,
                                  2.0L*MAX(configManager.cameraCalibration->pixelsX, configManager.cameraCalibration->pixelsY),
                                  ellipseColorWhite,
                                  true );
    }

    // detectamos features
    std::vector<cv::KeyPoint> imageKeypoints;
    configManager.featureDetector->detect(image, imageKeypoints, imageMask);

    // extraemos descriptores
    cv::Mat descriptors;
    configManager.descriptorExtractor->compute(image, imageKeypoints, descriptors);

    // IMPORTANTE: el size del imageKeypoints se DEBE tomar despues de extraer descriptores
    // ya que este metodo puede eliminar keypoints sobre los que no se haya podido computar
    // los descriptores.
    size_t imageKeypointsSize = imageKeypoints.size();

    for (int i = 0; i < vectorfeaturePredictionSize; ++i)
    {
        cv::Mat mask(cv::Mat::zeros(1, descriptors.rows, CV_8UC1));

        ImageFeaturePrediction *featurePrediction = vectorfeaturePrediction[i];

        MapFeature *currMapFeature= features[featurePrediction->featureIndex];

        cv::Size2f axesSize(0.0f, 0.0f);
        double angle = 0.0L;
        matrix2x2ToUncertaintyEllipse2D(featurePrediction->covarianceMatrix, axesSize, angle);

        for (int j = 0; j < imageKeypointsSize; ++j)
        {
            // Si el feature cae dentro de la elipse, agregarlo como posible candidato
            if ( pointIsInsideEllipse( imageKeypoints[j].pt,
                                       cv::Point2d(featurePrediction->imagePos[0], featurePrediction->imagePos[1]),
                                       axesSize,
                                       angle ) )
            {
                mask.at<uchar>(0, j) = true;
            }
        }

        std::vector<cv::DMatch> currFeatureMatch;
        matchICDescriptors( currMapFeature->descriptor,
                            descriptors,
                            currFeatureMatch,
                            mask );

        // Si encontramos algun match lo agregamos al vector de resultado
        if (currFeatureMatch.size() > 0)
        {
            cv::KeyPoint& matchedKeypoint = imageKeypoints[currFeatureMatch[0].trainIdx];

            // Agregamos un match al resultado
            FeatureMatch *newMatch = new FeatureMatch;
            newMatch->featureIndex = featurePrediction->featureIndex;
            newMatch->imagePos[0] = static_cast<double>(matchedKeypoint.pt.x);
            newMatch->imagePos[1] = static_cast<double>(matchedKeypoint.pt.y);
            newMatch->distance = currFeatureMatch[0].distance;

            descriptors.row(currFeatureMatch[0].trainIdx).copyTo(newMatch->imagePosDescriptor);

            matches.push_back(newMatch);
        }
    }
}