예제 #1
0
int Environment::Save(char* filename)
{
    int* ind = m_body->GetPointsIds();
    FILE* saveFile = fopen( filename, "w" );
    if(saveFile)
    {
        for( size_t cam_index = 0; cam_index < m_cameras.size(); cam_index++ )
        {
            CvPoint2D64f* image_points = m_image_points[cam_index];

            int numPnt = 0; //no visible points by default

            //check points visibility
            //camera is visible, check individual points
            for(int i = 0; i < m_body->NumPoints(); i++ )
            {
                if( m_point_visibility[cam_index][i] == PT_VISIBLE)
                {
                    //point is visible
                    numPnt++;
                }
            }

            if(numPnt)    //some points are visible
            {
                for( int i = 0; i < m_body->NumPoints(); i++)
                {
                    if( m_point_visibility[cam_index][i] == PT_VISIBLE )
                    {
                        //point is visible
                        fprintf(saveFile, "%d %d %d %.12f %.12f\n", 0 /*snapshot_id*/, (int)cam_index, ind[i], image_points[i].x, image_points[i].y );
                    }
                }
            }

        }
    }

    //close file
    fclose(saveFile);
    return 0;
}
예제 #2
0
int TestLevmar2()
{
    //load data from file

    // 1. load ground truth 3D points
    RigidBody body;

    char fname[2048];
    sprintf(fname, "%sPoints0.3d", g_filepath );
    body.Load(fname);
    int num_points = body.NumPoints();

    // 2. load cameras
    int num_cams = 0;
    //int num_cam_param = 10;
    std::vector<Camera*> cams;

    sprintf(fname, "%scameras.txt", g_filepath );
    FILE* fp = fopen(fname, "r" );
    while( !feof(fp) )
    {
        char fname[MAX_PATH];
        fscanf(fp, "%s\n", fname );
        Camera* newcam = new Camera();
        FILE* fp2 = fopen( fname, "r" );
        newcam->readCamParams(fp2);
        cams.push_back(newcam);
        fclose(fp2);
        num_cams++;
    }
    fclose(fp);

    // 2. Load projection data
    CvMat* m = cvCreateMat( body.NumPoints(), num_cams, CV_64FC2 );
    //all invisible point will have (-1,-1) projection
    cvSet( m, cvScalar(-1,-1) );

    sprintf(fname, "%salldata.txt", g_filepath );
    fp = fopen( fname, "r");

    int counter = 0;
    while( !feof(fp) )
    {
        //read line
        int snapid, cameraid, pointid;
        double x,y;
        fscanf(fp, "%d %d %d %lf %lf\n", &snapid, &cameraid, &pointid, &x, &y);
        cvSet2D(m, pointid, cameraid, cvScalar(x,y) );
        counter++;
    }

    vector<vector<int> > visibility;

    //transform this matrix to measurement vector
    vector<vector<Point2d> > imagePoints;

    for(int j = 0; j < num_cams; j++ )
    {
        vector<Point2d> vec;
        vector<int> visvec;
        for(int i = 0; i < num_points; i++ )
        {
            CvScalar s = cvGet2D( m, i, j );
            if( s.val[0] != -1 ) //point is visible
            {
                vec.push_back(Point2d(s.val[0],s.val[1]));
                visvec.push_back(1);
            }
            else
            {
                vec.push_back(Point2d(DBL_MAX,DBL_MAX));
                visvec.push_back(0);
            }
        }
        imagePoints.push_back(vec);
        visibility.push_back(visvec);
    }

    //form initial params
    vector<Mat> cameraMatrix; //intrinsic matrices of all cameras (input and output)
    vector<Mat> R; //rotation matrices of all cameras (input and output)
    vector<Mat> T; //translation vector of all cameras (input and output)
    vector<Mat> distCoeffs; //distortion coefficients of all cameras (input and output)

    //store original camera positions
    vector<Mat> T_backup;
    //store original camera rotations
    vector<Mat> R_backup;
    //store original intrinsic matrix
    vector<Mat> cameraMatrix_backup;
    //store original distortion
    vector<Mat> distCoeffs_backup;

    for( int i = 0; i < (int)cams.size(); i++ )
    {
        //fill params
        Camera* c = cams[i];
        //rotation
        double* rotmat = c->GetRotation();
        Mat rm(3,3,CV_64F,rotmat);
        R_backup.push_back(rm);
        //generate small random rotation
        Mat rodr;
        Rodrigues(rm, rodr);
        double n = norm(rodr);
        //add small angle
        //add about 5 degrees to rotation
        rodr *= ((n+0.1)/n);
        Rodrigues(rodr, rm);
        R.push_back(rm);

        //translation
        double* tr = c->GetTranslation();
        Mat tv(Size(1,3), CV_64F, tr);
        T_backup.push_back(tv);
        //add random translation within 1 cm
        Mat t_(3,1,CV_64F);
        randu(t_, -0.01, 0.01);
        tv+=t_;

        T.push_back(tv);
        //intrinsic matrix
        double* intr = c->GetIntrinsics();
        cameraMatrix_backup.push_back(Mat(Size(3,3), CV_64F, intr));
        cameraMatrix.push_back(Mat(Size(3,3), CV_64F, intr));

        //distortion
        Mat d(4, 1, CV_64F, c->GetDistortion() );
        distCoeffs_backup.push_back(d);

        //variate distortion by 5%
        d*=1.05;
        distCoeffs.push_back(d);

    }

    //form input points
    const Point3d* ptptr = (const Point3d*)body.GetPoints3D(true);
    vector<Point3d> points(ptptr, ptptr + num_points);
    //store points
    vector<Point3d> points_backup(num_points);
    std::copy(points.begin(), points.end(), points_backup.begin());

    //variate initial points
    CvRNG rng;
    CvMat* values = cvCreateMat( num_points*3, 1, CV_64F );
    cvRandArr( &rng, values, CV_RAND_NORMAL,
               cvScalar(0.0), // mean
               cvScalar(0.02) // deviation (in meters)
             );
    CvMat tmp = cvMat(values->rows, values->cols, values->type, &points[0] );
    cvAdd( &tmp, values, &tmp );
    cvReleaseMat( &values );

    LevMarqSparse::bundleAdjust( points, //positions of points in global coordinate system (input and output)
                                 imagePoints, //projections of 3d points for every camera
                                 visibility, //visibility of 3d points for every camera
                                 cameraMatrix, //intrinsic matrices of all cameras (input and output)
                                 R, //rotation matrices of all cameras (input and output)
                                 T, //translation vector of all cameras (input and output)
                                 distCoeffs, //distortion coefficients of all cameras (input and output)
                                 TermCriteria(TermCriteria::COUNT|TermCriteria::EPS, 3000, DBL_EPSILON ));
    //,enum{MOTION_AND_STRUCTURE,MOTION,STRUCTURE})

    //compare input points and found points
    double maxdist = 0;
    for( size_t i = 0; i < points.size(); i++ )
    {
        Point3d in = points_backup[i];
        Point3d out = points[i];
        double dist = sqrt(in.dot(out));
        if(dist > maxdist)
            maxdist = dist;
    }
    printf("Maximal distance between points: %.12lf\n", maxdist );

    //compare camera positions
    maxdist = 0;
    for( size_t i = 0; i < T.size(); i++ )
    {
        double dist = norm(T[i], T_backup[i]);

        if(dist > maxdist)
            maxdist = dist;
    }
    printf("Maximal distance between cameras centers: %.12lf\n", maxdist );

    //compare rotation matrices
    maxdist = 0;
    for( size_t i = 0; i < R.size(); i++ )
    {
        double dist = norm(R[i], R_backup[i], NORM_INF);

        if(dist > maxdist)
            maxdist = dist;
    }
    printf("Maximal difference in rotation matrices elements: %.12lf\n", maxdist );

    //compare intrinsic matrices
    maxdist = 0;
    double total_diff = 0;
    for( size_t i = 0; i < cameraMatrix.size(); i++ )
    {
        double fx_ratio = cameraMatrix[i].at<double>(0,0)/
                          cameraMatrix_backup[i].at<double>(0,0);
        double fy_ratio = cameraMatrix[i].at<double>(1,1)/
                          cameraMatrix_backup[i].at<double>(1,1);
        double cx_diff = cameraMatrix[i].at<double>(0,2) -
                         cameraMatrix_backup[i].at<double>(0,2);
        double cy_diff = cameraMatrix[i].at<double>(1,2) -
                         cameraMatrix_backup[i].at<double>(1,2);
        total_diff += fabs(fx_ratio - 1) + fabs(fy_ratio - 1) + fabs(cx_diff) + fabs(cy_diff);
    }
    //ts->printf(CvTS::LOG, "total diff = %g\n", total_diff);

    return 1;
}
예제 #3
0
int Environment::Capture()
{
    //clear points
    for( size_t i = 0; i < m_image_points.size(); i++ )
    {
        if( m_image_points[i]  )
            delete m_image_points[i];
    }
    m_image_points.clear();

    for( size_t i = 0; i < m_point_visibility.size(); i++ )
    {
        if( m_point_visibility[i]  )
            delete m_point_visibility[i];
    }
    m_point_visibility.clear();

    CvPoint3D64f* board_pts = m_body->GetPoints3D(true);
    int num_points = m_body->NumPoints();

    //loop over cameras
    for( size_t i = 0; i < m_cameras.size(); i++ )
    {
        //get camera parameters
        //project points onto camera image

        double* rot = m_cameras[i]->GetRotation();
        double* trans = m_cameras[i]->GetTranslation();
        double* intr = m_cameras[i]->GetIntrinsics();
        double* dist = m_cameras[i]->GetDistortion();

        CvPoint2D64f* image_points = new CvPoint2D64f[num_points];
        m_image_points.push_back(image_points);

        int* points_visibility = new int[num_points];
        m_point_visibility.push_back(points_visibility);

        cvProjectPointsSimple(num_points, board_pts, rot, trans, intr, dist, image_points);

        CvRNG rng;

        if( noise > 0)
        {
            CvMat* values = cvCreateMat( num_points, 1, CV_32FC2 );

            float stdev = noise;

            cvRandArr( &rng, values, CV_RAND_NORMAL,
                       cvScalar(0.0, 0.0), // mean
                       cvScalar(stdev, stdev) // deviation
                     );

            //add gaussian noise to image points

            for( int j = 0; j < num_points; j++ )
            {
                CvPoint2D32f pt = *(CvPoint2D32f*)cvPtr1D( values, j, 0 );

                pt.x = min( pt.x, stdev);
                pt.x = max( pt.x, -stdev);

                pt.y = min( pt.y, stdev);
                pt.y = max( pt.y, -stdev);

                image_points[j].x += pt.x;
                image_points[j].y += pt.y;
            }
            cvReleaseMat( &values );
        }

        //decide if point visible to camera
        //loop over points and assign visibility flag to them
        for( int j = 0; j < num_points; j++ )
        {
            //generate random visibility of the point
            int visible = cvRandInt(&rng) % 2; //visibility 50%

            //check the point is in camera FOV    (1-pixel border assumed invisible)
            if( image_points[j].x > 0 && image_points[j].x < m_cameras[i]->GetResolution().width-1
                    && image_points[j].y > 0 && image_points[j].y < m_cameras[i]->GetResolution().height-1 )
            {
                if(!visible)
                    points_visibility[j] = PT_INVISIBLE;
                else
                    points_visibility[j] = PT_VISIBLE;
            }
            else
                points_visibility[j] = PT_OUTBORDER;
        }
    }

    //some points may become completely invisible for all cameras or visible only at one camera
    //we will forcely make them visible for at least 2 cameras
    for( int i = 0 ; i < num_points; i++ )
    {
        int numvis = 0;
        for( size_t j = 0; j < m_cameras.size(); j++ )
        {
            if( m_point_visibility[j][i] == PT_VISIBLE )
                numvis++;
        }

        if(numvis < 2)
        {
            for( size_t j = 0; j < m_cameras.size(); j++ )
            {
                if( m_point_visibility[j][i] == PT_INVISIBLE )
                {
                    m_point_visibility[j][i] = PT_VISIBLE;
                    numvis++;
                }
                if(numvis > 1)
                    break;
            }
            assert(numvis > 1 );
        }
    }

    return 0;
}