const std::shared_ptr< std::vector< CLandmark* > > CTrackerStereoMotionModel::_getNewLandmarks( const UIDFrame& p_uFrame,
                                                                                     cv::Mat& p_matDisplay,
                                                                                     const cv::Mat& p_matImageLEFT,
                                                                                     const cv::Mat& p_matImageRIGHT,
                                                                                     const Eigen::Isometry3d& p_matTransformationWORLDtoLEFT,
                                                                                     const Eigen::Isometry3d& p_matTransformationLEFTtoWORLD,
                                                                                     const Eigen::Vector3d& p_vecRotation )
{
    //ds precompute extrinsics
    const MatrixProjection matProjectionWORLDtoLEFT( m_pCameraLEFT->m_matProjection*p_matTransformationWORLDtoLEFT.matrix( ) );

    //ds solution holder
    std::shared_ptr< std::vector< CLandmark* > > vecNewLandmarks( std::make_shared< std::vector< CLandmark* > >( ) );

    //ds detect new keypoints
    //const std::shared_ptr< std::vector< cv::KeyPoint > > vecKeyPoints( m_cDetector.detectKeyPointsTilewise( p_matImageLEFT, matMask ) );
    std::vector< cv::KeyPoint > vecKeyPoints;
    m_pDetector->detect( p_matImageLEFT, vecKeyPoints, m_cMatcher.getMaskActiveLandmarks( p_matTransformationWORLDtoLEFT, p_matDisplay ) );

    //ds compute descriptors for the keypoints
    CDescriptor matReferenceDescriptors;
    //m_pExtractor->compute( p_matImageLEFT, *vecKeyPoints, matReferenceDescriptors );
    m_pExtractor->compute( p_matImageLEFT, vecKeyPoints, matReferenceDescriptors );

    //ds process the keypoints and see if we can use them as landmarks
    for( uint32_t u = 0; u < vecKeyPoints.size( ); ++u )
    {
        //ds current points
        const cv::KeyPoint cKeyPointLEFT( vecKeyPoints[u] );
        const cv::Point2f ptLandmarkLEFT( cKeyPointLEFT.pt );
        const CDescriptor matDescriptorLEFT( matReferenceDescriptors.row(u) );

        try
        {
            //ds triangulate the point
            const CMatchTriangulation cMatch( m_pTriangulator->getPointTriangulatedCompactInRIGHT( p_matImageRIGHT, cKeyPointLEFT, matDescriptorLEFT ) );
            const CPoint3DCAMERA vecPointTriangulatedLEFT( cMatch.vecPointXYZCAMERA );
            const CDescriptor matDescriptorRIGHT( cMatch.matDescriptorCAMERA );

            //ds check depth
            const double dDepthMeters( vecPointTriangulatedLEFT.z( ) );

            //ds check if point is in front of camera an not more than a defined distance away
            if( m_dMinimumDepthMeters < dDepthMeters && m_dMaximumDepthMeters > dDepthMeters )
            {
                //ds compute triangulated point in world frame
                const CPoint3DWORLD vecPointTriangulatedWORLD( p_matTransformationLEFTtoWORLD*vecPointTriangulatedLEFT );

                //ds landmark right
                const cv::Point2f ptLandmarkRIGHT( cMatch.ptUVCAMERA );

                //ds allocate a new landmark and add the current position
                CLandmark* pLandmark( new CLandmark( m_uAvailableLandmarkID,
                                                     matDescriptorLEFT,
                                                     cMatch.matDescriptorCAMERA,
                                                     cKeyPointLEFT.size,
                                                     vecPointTriangulatedWORLD,
                                                     m_pCameraLEFT->getNormalHomogenized( ptLandmarkLEFT ),
                                                     ptLandmarkLEFT,
                                                     ptLandmarkRIGHT,
                                                     vecPointTriangulatedLEFT,
                                                     p_matTransformationLEFTtoWORLD.translation( ),
                                                     p_vecRotation,
                                                     matProjectionWORLDtoLEFT,
                                                     p_uFrame ) );

                //ds log creation
                CLogger::CLogLandmarkCreation::addEntry( p_uFrame, pLandmark, dDepthMeters, ptLandmarkLEFT, ptLandmarkRIGHT );

                //ds add to newly detected
                vecNewLandmarks->push_back( pLandmark );

                //ds next landmark id
                ++m_uAvailableLandmarkID;

                //ds draw detected point
                cv::line( p_matDisplay, ptLandmarkLEFT, cv::Point2f( ptLandmarkLEFT.x+m_pCameraSTEREO->m_uPixelWidth, ptLandmarkLEFT.y ), CColorCodeBGR( 175, 175, 175 ) );
                cv::circle( p_matDisplay, ptLandmarkLEFT, 2, CColorCodeBGR( 0, 255, 0 ), -1 );
                //cv::circle( p_matDisplay, ptLandmarkLEFT, cLandmark->dKeyPointSize, CColorCodeBGR( 255, 0, 0 ), 1 );
                cv::putText( p_matDisplay, std::to_string( pLandmark->uID ) , cv::Point2d( ptLandmarkLEFT.x+pLandmark->dKeyPointSize, ptLandmarkLEFT.y+pLandmark->dKeyPointSize ), cv::FONT_HERSHEY_PLAIN, 0.5, CColorCodeBGR( 0, 0, 255 ) );

                //ds draw reprojection of triangulation
                cv::circle( p_matDisplay, cv::Point2d( ptLandmarkRIGHT.x+m_pCameraSTEREO->m_uPixelWidth, ptLandmarkRIGHT.y ), 2, CColorCodeBGR( 255, 0, 0 ), -1 );
            }
            else
            {
                cv::circle( p_matDisplay, ptLandmarkLEFT, 2, CColorCodeBGR( 0, 0, 255 ), -1 );
                //cv::circle( p_matDisplay, ptLandmarkLEFT, cKeyPoint.size, CColorCodeBGR( 0, 0, 255 ) );

                //std::printf( "<CTrackerStereoMotionModel>(_getNewLandmarks) could not find match for keypoint (invalid depth: %f m)\n", vecPointTriangulatedLEFT(2) );
            }
        }
        catch( const CExceptionNoMatchFound& p_cException )
        {
            cv::circle( p_matDisplay, ptLandmarkLEFT, 2, CColorCodeBGR( 0, 0, 255 ), -1 );
            //cv::circle( p_matDisplay, ptLandmarkLEFT, cKeyPoint.size, CColorCodeBGR( 0, 0, 255 ) );

            //std::printf( "<CTrackerStereoMotionModel>(_getNewLandmarks) could not find match for keypoint (%s)\n", p_cException.what( ) );
        }
    }

    //std::printf( "<CTrackerStereoMotionModel>(_getNewLandmarks) added new landmarks: %lu/%lu\n", vecNewLandmarks->size( ), vecKeyPoints.size( ) );

    //ds return found landmarks
    return vecNewLandmarks;
}
Example #2
0
void computeDepth( const cv::Mat& p_matImageLEFT, const cv::Mat& p_matImageRIGHT )
{
    //ds preprocessed images
    cv::Mat matPreprocessedLEFT;
    cv::Mat matPreprocessedRIGHT;

    //ds preprocess images
    cv::equalizeHist( p_matImageLEFT, matPreprocessedLEFT );
    cv::equalizeHist( p_matImageRIGHT, matPreprocessedRIGHT );
    g_pCameraSTEREO->undistortAndrectify( matPreprocessedLEFT, matPreprocessedRIGHT );

    //ds get images into triple channel mats (display only)
    cv::Mat matDisplayLEFT;
    cv::Mat matDisplayRIGHT;

    //ds get images to triple channel for colored display
    cv::cvtColor( matPreprocessedLEFT, matDisplayLEFT, cv::COLOR_GRAY2BGR );
    cv::cvtColor( matPreprocessedRIGHT, matDisplayRIGHT, cv::COLOR_GRAY2BGR );

    //ds detect features
    std::vector< cv::KeyPoint > vecKeyPoints;
    g_pDetector->detect( matPreprocessedLEFT, vecKeyPoints );

    //ds compute descriptors for the keypoints
    CDescriptor matReferenceDescriptors;
    g_pExtractor->compute( matPreprocessedLEFT, vecKeyPoints, matReferenceDescriptors );

    //ds count active points
    uint64_t uActivePoints( 0 );

    //ds errors
    double dAverageErrorSVDLS( 0.0 );
    double dAverageErrorQRLS( 0.0 );
    double dAverageErrorEDS( 0.0 );
    double dAverageErrorSVDDLT( 0.0 );
    uint64_t uPointsSVDDLS( 0 );
    uint64_t uPointsQRLS( 0 );
    uint64_t uPointsEDS( 0 );
    uint64_t uPointsSVDDLT( 0 );

    //ds process the keypoints and see if we can use them as landmarks
    for( uint32_t u = 0; u < vecKeyPoints.size( ); ++u )
    {
        //ds current points
        const cv::KeyPoint cKeyPoint( vecKeyPoints[u] );
        const cv::Point2f ptLandmarkLEFT( cKeyPoint.pt );
        const CDescriptor& matReferenceDescriptorLEFT( matReferenceDescriptors.row(u) );

        //ds draw detected point
        cv::circle( matDisplayLEFT, ptLandmarkLEFT, 2, CColorCodeBGR( 255, 0, 0 ), -1 );

        //ds triangulations - set on success else null
        const CPoint3DCAMERA* vecPointTriangulatedSTEREOSVDLS( 0 );
        const CPoint3DCAMERA* vecPointTriangulatedSTEREOQRLS( 0 );
        const CPoint3DCAMERA* vecPointTriangulatedSTEREOEDS( 0 );
        const CPoint3DCAMERA* vecPointTriangulatedSTEREOSVDDLT( 0 );

        try
        {
            //ds triangulate the point
            vecPointTriangulatedSTEREOSVDLS = new CPoint3DCAMERA( g_pTriangulator->getPointTriangulatedLimitedSVDLS( matDisplayRIGHT, matPreprocessedRIGHT, cKeyPoint, matReferenceDescriptorLEFT ) );
        }
        catch( const CExceptionNoMatchFound& p_cException )
        {
            //std::cout << "exception: " << p_cException.what( ) << std::endl;
        }

        /*try
        {
            vecPointTriangulatedSTEREOQRLS  = new CPoint3DInCameraFrame( g_pTriangulator->getPointTriangulatedLimitedQRLS( matPreprocessedRIGHT, cKeyPoint, matReferenceDescriptorLEFT ) );
        }
        catch( const CExceptionNoMatchFound& p_cException )
        {

        }

        try
        {
            vecPointTriangulatedSTEREOSVDDLT = new CPoint3DInCameraFrame( g_pTriangulator->getPointTriangulatedLimitedSVDDLT( matPreprocessedRIGHT, cKeyPoint, matReferenceDescriptorLEFT ) );
        }
        catch( const CExceptionNoMatchFound& p_cException )
        {

        }*/

        /*try
        {
            std::vector< cv::KeyPoint > vecKeyPointsEDS;
            std::vector< CPoint3DInCameraFrame > vecTriangulatedPoints;

            //ds normalized coords
            const CPoint2DInCameraFrame vecLandmarkLEFTNormalized( g_pCameraSTEREO->m_pCameraLEFT->getNormalized( ptLandmarkLEFT ) );
            //const CPoint2DInCameraFrame vecLandmarkLEFTNormalized( ptLandmarkLEFT.x/g_pCameraSTEREO->m_pCameraLEFT->m_dFx, ptLandmarkLEFT.y/g_pCameraSTEREO->m_pCameraLEFT->m_dFy );*/

            /*ds calibrate scaling factor for depth of one meter (EPIPOLAR CONSTRAINT)
            double dErrorPixel( 1000.0 );
            double dVerticalFactor( 1.0 );

            #pragma omp parallel for shared( dErrorPixel, dVerticalFactor )
            for( uint32_t u = 1; u < 20; ++u )
            {
                const double dDepthSampling( u/static_cast< double >( 10.0 ) );

                for( int32_t i = 1; i < 15; ++i )
                {
                    //ds sample current solution
                    const double dVerticalFactorCurrent = i/static_cast< double >( 10.0 );
                    const CPoint3DInCameraFrame vecPointUncalibratedEDSLEFT( dVerticalFactorCurrent*dDepthSampling*vecLandmarkLEFTNormalized(0), dVerticalFactorCurrent*dDepthSampling*vecLandmarkLEFTNormalized(1), dDepthSampling );
                    //const CPoint3DInCameraFrame vecPointUncalibratedEDSRIGHT( g_pCameraSTEREO->m_matTransformLEFTtoRIGHT*vecPointUncalibratedEDSLEFT );
                    const double dErrorPixelCurrent( std::fabs( g_pCameraSTEREO->m_pCameraRIGHT->getProjection( vecPointUncalibratedEDSLEFT ).y-ptLandmarkLEFT.y ) );

                    if( dErrorPixel > dErrorPixelCurrent )
                    {
                        dVerticalFactor = dVerticalFactorCurrent;
                        dErrorPixel     = dErrorPixelCurrent;
                    }
                }
            }

            assert( 1000.0 != dErrorPixel );

            if( 0.1 == dVerticalFactor || 1.5 == dVerticalFactor )
            {
                throw CExceptionNoMatchFound( "<CTriangulator>(getPointTriangulatedLimited) could not calibrate offset" );
            }*/

            //std::cout << "best min error: " << dErrorPixel << " (scale factor: " << dVerticalFactor << " )" << std::endl;

            //ds exponential depth sampling triangulation EDS
            //for( int32_t iExponent = -50; iExponent < 50; ++iExponent )
            /*#pragma omp parallel for
            for( uint32_t uDepthDecimeter = 1; uDepthDecimeter < 100; ++uDepthDecimeter )
            {
                //ds current depth
                const double dDepthMeter( uDepthDecimeter/static_cast< double >( 10.0 ) );

                //ds "compute triangulated point"
                const CPoint3DInCameraFrame vecPointTriangulatedEDSLEFT( vecLandmarkLEFTNormalized(0)*dDepthMeter, vecLandmarkLEFTNormalized(1)*dDepthMeter, dDepthMeter );
                //const CPoint3DInCameraFrame vecPointTriangulatedEDSRIGHT( g_pCameraSTEREO->m_matTransformLEFTtoRIGHT*vecPointTriangulatedEDSLEFT );

                //std::cout << vecPointTriangulatedEDSLEFT.transpose( ) << " - " << vecPointTriangulatedEDSRIGHT.transpose( ) << std::endl;

                //ds project the point into the right camera frame
                const cv::Point2d ptLandmarkRIGHTEDS( g_pCameraSTEREO->m_pCameraRIGHT->getProjection( vecPointTriangulatedEDSLEFT ) );

                if( g_pCameraSTEREO->m_cVisibleRange.contains( ptLandmarkRIGHTEDS ) )
                {
                    //ds TODO shift vector combination
                    #pragma omp critical
                    {
                        vecKeyPointsEDS.push_back( cv::KeyPoint( ptLandmarkRIGHTEDS, cKeyPoint.size ) );
                        vecTriangulatedPoints.push_back( vecPointTriangulatedEDSLEFT );
                    }
                    cv::circle( matDisplayRIGHT, ptLandmarkRIGHTEDS, 2, CColorCodeBGR( 200, 200, 200 ), -1 );
                }
                else
                {
                    //ds stop condition
                    if( !vecKeyPointsEDS.empty( ) )
                    {
                        //break;
                        uDepthDecimeter = 100;
                    }
                }
            }

            if( !vecKeyPointsEDS.empty( ) )
            {
                //ds compute descriptors for the keypoints
                CDescriptor matDescriptorsEDS;
                g_pExtractor->compute( matPreprocessedRIGHT, vecKeyPointsEDS, matDescriptorsEDS );

                //ds match the descriptors
                std::vector< cv::DMatch > vecMatches;
                g_pMatcher->match( matReferenceDescriptorLEFT, matDescriptorsEDS, vecMatches );

                if( vecMatches.empty( ) )
                {
                    throw CExceptionNoMatchFound( "<CTriangulator>(getPointTriangulatedLimited) no match found" );
                }

                //ds check match quality
                if( g_dMatchingDistanceCutoffTriangulation > vecMatches[0].distance )
                {
                    vecPointTriangulatedSTEREOEDS = new CPoint3DInCameraFrame( vecTriangulatedPoints[vecMatches[0].trainIdx] );
                }
                else
                {
                    throw CExceptionNoMatchFound( "<CTriangulator>(getPointTriangulatedLimited) matching distance: " + std::to_string( vecMatches[0].distance ) );
                }
            }
        }
        catch( const CExceptionNoMatchFound& p_cException )
        {

        }*/

        //ds escape if no depth has been computed
        if( 0 == vecPointTriangulatedSTEREOEDS && 0 == vecPointTriangulatedSTEREOQRLS && 0 == vecPointTriangulatedSTEREOSVDLS && 0 == vecPointTriangulatedSTEREOSVDDLT )
        {
            //ds UGLY
            continue;
        }

        std::string strDepthInfo( "" );

        if( 0 != vecPointTriangulatedSTEREOSVDLS )
        {
            //ds compute error
            const cv::Point2d ptLandmarkRIGHT( g_pCameraSTEREO->m_pCameraRIGHT->getProjection( *vecPointTriangulatedSTEREOSVDLS ) );
            const double dDisparityPixels( ptLandmarkLEFT.x-ptLandmarkRIGHT.x );
            const double dDepthMeters( ( *vecPointTriangulatedSTEREOSVDLS )(2) );
            const double dDepthErrorMeters( ( *vecPointTriangulatedSTEREOSVDLS )(2)*( *vecPointTriangulatedSTEREOSVDLS )(2)/( g_pCameraSTEREO->m_dBaselineMeters*g_pCameraSTEREO->m_pCameraLEFT->m_dFx )*dDisparityPixels );
            dAverageErrorSVDLS += dDepthErrorMeters/dDepthMeters;
            ++uPointsSVDDLS;

            char chBufferDepth[20];
            std::snprintf( chBufferDepth, 20, "|%.2f", ( *vecPointTriangulatedSTEREOSVDLS )(2) );
            strDepthInfo += chBufferDepth;

            cv::circle( matDisplayRIGHT, ptLandmarkRIGHT, 10, CColorCodeBGR( 255, 0, 255 ), 1 );
        }
        else
        {
            strDepthInfo += "|X";
        }

        /*if( 0 != vecPointTriangulatedSTEREOQRLS )
        {
            //ds compute error
            const cv::Point2d ptLandmarkRIGHT( g_pCameraSTEREO->m_pCameraRIGHT->getProjection(*vecPointTriangulatedSTEREOQRLS ) );
            const double dDisparityPixels( ptLandmarkLEFT.x-ptLandmarkRIGHT.x );
            const double dDepthMeters( ( *vecPointTriangulatedSTEREOQRLS )(2) );
            const double dDepthErrorMeters( ( *vecPointTriangulatedSTEREOQRLS )(2)*( *vecPointTriangulatedSTEREOQRLS )(2)/g_pCameraSTEREO->m_dBaselineMeters*dDisparityPixels/g_pCameraSTEREO->m_pCameraLEFT->m_dFx );
            dAverageErrorQRLS += dDepthErrorMeters/dDepthMeters;
            ++uPointsQRLS;

            char chBufferDepth[20];
            std::snprintf( chBufferDepth, 20, "|%.2f", ( *vecPointTriangulatedSTEREOQRLS )(2) );
            strDepthInfo += chBufferDepth;

            cv::circle( matDisplayRIGHT, ptLandmarkRIGHT, 12, CColorCodeBGR( 128, 0, 128 ), 1 );
        }
        else
        {
            strDepthInfo += "|X";
        }

        if( 0 != vecPointTriangulatedSTEREOSVDDLT )
        {
            //ds compute error
            const cv::Point2d ptLandmarkRIGHT( g_pCameraSTEREO->m_pCameraRIGHT->getProjection( *vecPointTriangulatedSTEREOSVDDLT ) );
            const double dDisparityPixels( ptLandmarkLEFT.x-ptLandmarkRIGHT.x );
            const double dDepthMeters( ( *vecPointTriangulatedSTEREOSVDDLT )(2) );
            const double dDepthErrorMeters( ( *vecPointTriangulatedSTEREOSVDDLT )(2)*( *vecPointTriangulatedSTEREOSVDDLT )(2)/( g_pCameraSTEREO->m_dBaselineMeters*g_pCameraSTEREO->m_pCameraLEFT->m_dFx )*dDisparityPixels );
            dAverageErrorSVDDLT += dDepthErrorMeters/dDepthMeters;
            ++uPointsSVDDLT;

            char chBufferDepth[20];
            std::snprintf( chBufferDepth, 20, "|%.2f", ( *vecPointTriangulatedSTEREOSVDDLT )(2) );
            strDepthInfo += chBufferDepth;

            cv::circle( matDisplayRIGHT, ptLandmarkRIGHT, 14, CColorCodeBGR( 255, 0, 0 ), 1 );
        }
        else
        {
            strDepthInfo += "|X";
        }

        if( 0 != vecPointTriangulatedSTEREOEDS )
        {
            //ds compute error
            const cv::Point2d ptLandmarkRIGHT( g_pCameraSTEREO->m_pCameraRIGHT->getProjection( *vecPointTriangulatedSTEREOEDS  ) );
            const double dDisparityPixels( ptLandmarkLEFT.x-ptLandmarkRIGHT.x );
            const double dDepthMeters( ( *vecPointTriangulatedSTEREOEDS )(2) );
            const double dDepthErrorMeters( dDepthMeters*dDepthMeters/( g_pCameraSTEREO->m_dBaselineMeters*g_pCameraSTEREO->m_pCameraLEFT->m_dFx )*dDisparityPixels );
            dAverageErrorEDS += dDepthErrorMeters/dDepthMeters;
            ++uPointsEDS;

            char chBufferDepth[20];
            std::snprintf( chBufferDepth, 20, "|%.2f", ( *vecPointTriangulatedSTEREOEDS )(2) );
            strDepthInfo += chBufferDepth;

            cv::circle( matDisplayRIGHT, ptLandmarkRIGHT, 16, CColorCodeBGR( 0, 255, 0 ), 1 );
        }
        else
        {
            strDepthInfo += "|X";
        }*/

        cv::putText( matDisplayLEFT, strDepthInfo, cv::Point2d( ptLandmarkLEFT.x+cKeyPoint.size, ptLandmarkLEFT.y+cKeyPoint.size ), cv::FONT_HERSHEY_PLAIN, 0.75, CColorCodeBGR( 0, 0, 255 ) );

        ++uActivePoints;

        //ds free memory
        if( 0 != vecPointTriangulatedSTEREOSVDLS ){ delete vecPointTriangulatedSTEREOSVDLS; }
        if( 0 != vecPointTriangulatedSTEREOQRLS ){ delete vecPointTriangulatedSTEREOQRLS; }
        if( 0 != vecPointTriangulatedSTEREOSVDDLT ){ delete vecPointTriangulatedSTEREOSVDDLT; }
        if( 0 != vecPointTriangulatedSTEREOEDS ){ delete vecPointTriangulatedSTEREOEDS; }
    }

    //ds compute averages
    if( 0 != uPointsSVDDLS ){ dAverageErrorSVDLS  = dAverageErrorSVDLS/uPointsSVDDLS; }
    if( 0 != uPointsQRLS ){ dAverageErrorQRLS     = dAverageErrorQRLS/uPointsQRLS; }
    if( 0 != uPointsSVDDLT ){ dAverageErrorSVDDLT = dAverageErrorSVDDLT/uPointsSVDDLT; }
    if( 0 != uPointsEDS ){ dAverageErrorEDS       = dAverageErrorEDS/uPointsEDS; }















    //ds build display mat
    cv::hconcat( matDisplayLEFT, matDisplayRIGHT, g_matDisplay );

    char chBuffer[100];
    std::snprintf( chBuffer, 100, "ACTIVE POINTS: %lu ERRORS[ SVDLS: %4.2f(%2lu) QRLS: %4.2f(%2lu) SVDDLT: %4.2f(%2lu) EDS: %4.2f(%2lu) ]", uActivePoints,
                   dAverageErrorSVDLS, uPointsSVDDLS, dAverageErrorQRLS, uPointsQRLS, dAverageErrorSVDDLT, uPointsSVDDLT, dAverageErrorEDS, uPointsEDS );
    g_matDisplay( cv::Rect( 0, 0, g_pCameraSTEREO->m_uPixelWidth, 17 ) ).setTo( CColorCodeBGR( 0, 0, 0 ) );
    cv::putText( g_matDisplay, chBuffer , cv::Point2i( 2, 12 ), cv::FONT_HERSHEY_PLAIN, 0.8, CColorCodeBGR( 0, 0, 255 ) );

    //ds display
    cv::imshow( "stereo depth calibration", g_matDisplay );
    cv::waitKey( 1 );
}