コード例 #1
0
void CTomGineThread::AddSifts(const std::vector<blortRecognizer::Siftex>& sl)
{
	m_mutex.Lock();
		m_lastsiftexlist = sl;
		for(unsigned i=0; i<sl.size(); i++)
		  m_siftexlist.push_back(sl[i]);
	m_mutex.Unlock();
	
	double pos[3] = {0,0,0};
	double normal[3] = {0,0,0};
	double view[3] = {0,0,0};
	unsigned sc = sl.size();
	
	for(unsigned i=0; i<sc; i++){
	  pos[0] += sl[i].pos.x;
	  pos[1] += sl[i].pos.y;
	  pos[2] += sl[i].pos.z;
	  normal[0] += sl[i].normal.x;
	  normal[1] += sl[i].normal.y;
	  normal[2] += sl[i].normal.z;
	  view[0] += sl[i].viewray.x;
	  view[1] += sl[i].viewray.y;
	  view[2] += sl[i].viewray.z;
	}
	
	vec3 vPos = vec3(float(pos[0]/sc), float(pos[1]/sc), float(pos[2]/sc)); 
	//vec3 vNormal = vec3(float(normal[0]/sc), float(normal[1]/sc), float(normal[2]/sc)); 
	vec3 vView = vec3(float(view[0]/sc), float(view[1]/sc), float(view[2]/sc));
	
	vec3 p = vPos - vView;
    //vec3 p = sl[0].pos - sl[0].viewray;
	
	vec3 f = vView;	f.normalize();
	vec3 u = vec3(0,0,1);
	vec3 s;
	s.cross(u,f); s.normalize();
	u.cross(f,s); u.normalize();

	float rad = 0.0f;
	for(unsigned i=0; i<sc; i++){
		vec3 d = vPos - sl[i].pos;
		float l = d.length();
		if(rad < l)
		  rad=l;
	}
	m_viewscale.push_back(vec3(rad, rad, vView.length()));

	float fR[9] = { s.x, s.y, s.z,
					u.x, u.y, u.z,
					f.x, f.y, f.z};
	mat3 R = mat3(fR);
	TomGine::tgPose pose;
	pose.SetPose(R, p);
	m_viewlist.push_back(pose);
}
コード例 #2
0
ファイル: tgErrorMetric.cpp プロジェクト: HomeBaseDev/sandbox
vec3 tgErrorMetric::GetMean(const TomGine::tgPose &p1, const TomGine::tgPose &p2)
{
	double err_x = 0.0;
	double err_y = 0.0;
	double err_z = 0.0;
	double dN = 1.0 / pointlist.size();
	for(unsigned i=0; i<pointlist.size(); i++){
		mat3 R1, R2;
		vec3 t1, t2;
		
		p1.GetPose(R1,t1);
		p2.GetPose(R2,t2);
		
		vec3 verr = (R1 * pointlist[i] + t1) - (R2 * pointlist[i] + t2);
		
		err_x += double(fabs(verr.x)) * dN;
		err_y += double(fabs(verr.y)) * dN;
		err_z += double(fabs(verr.z)) * dN;
	}
	vec3 err = vec3(err_x, err_y, err_z);
	return err;
}
コード例 #3
0
void Recognizer3D::Convert(P::PoseCv& p1, TomGine::tgPose& p2){

    mat3 cR;
    vec3 ct;

    cR[0] = cvmGet(p1.R,0,0); cR[3] = cvmGet(p1.R,0,1); cR[6] = cvmGet(p1.R,0,2);
    cR[1] = cvmGet(p1.R,1,0); cR[4] = cvmGet(p1.R,1,1); cR[7] = cvmGet(p1.R,1,2);
    cR[2] = cvmGet(p1.R,2,0); cR[5] = cvmGet(p1.R,2,1); cR[8] = cvmGet(p1.R,2,2);

    ct.x = cvmGet(p1.t,0,0);
    ct.y = cvmGet(p1.t,1,0);
    ct.z = cvmGet(p1.t,2,0);

    p2.SetPose(cR,ct);
}
コード例 #4
0
void getCamPose(const char* pose_cal_file, TomGine::tgPose& camPose){
    CDataFile cdfParams;
    if(!cdfParams.Load(pose_cal_file)){
        char errmsg[128];
        sprintf(errmsg, "[utilities::getCamPose] Can not open pose_cal file '%s'", pose_cal_file);
        throw std::runtime_error(errmsg);
    }

    vec3 t, r;
    mat3 R;

    std::string pose = cdfParams.GetString("pose");
    sscanf( pose.c_str(), "[%f %f %f] [%f %f %f]",
            &(t.x), &(t.y), &(t.z), &(r.x), &(r.y), &(r.z) );

    R.fromRotVector(r);

    camPose.SetPose(R,t);
}
コード例 #5
0
void ConvertCv2tgPoseWorld(	const cv::Mat_<double> &R, const cv::Mat_<double> &T, 
                                const TomGine::tgPose &camPose, TomGine::tgPose &pose)
{
    mat3 r;
    vec3 t, rvec;
    TomGine::tgPose pose_cam_coord, camPoseT;
    camPoseT = camPose.Transpose();

    t.x = T(0,0);
    t.y = T(1,0);
    t.z = T(2,0);
    rvec.x = R(0,0);
    rvec.y = R(1,0);
    rvec.z = R(2,0);
    r.fromRotVector(rvec);
    r = r.transpose();

    pose_cam_coord.SetPose(r,t);

    pose = camPoseT * pose_cam_coord;
}
コード例 #6
0
void getCamPoseCvCheckerboard(const cv::Mat_<double> &R, const cv::Mat_<double> &T, TomGine::tgPose &camPose)
{
    vec3 r, t;
    mat3 Rm;
    cv::Mat_<double> cv_Rm = cv::Mat_<double> ( 3,3 );
    cv::Mat_<double> cv_rvec = cv::Mat_<double> ( 3,1 );
    cv::Mat_<double> cv_tvec = cv::Mat_<double> ( 3,1 );
    cv::Rodrigues(R, cv_Rm);
    cv::transpose(cv_Rm, cv_Rm);
    cv_tvec = -cv_Rm * T;				// t = -R^T * t + t
    cv::Rodrigues(cv_Rm, cv_rvec);

    r.x = cv_rvec(0,0);
    r.y = cv_rvec(1,0);
    r.z = cv_rvec(2,0);
    t.x = cv_tvec(0,0);
    t.y = cv_tvec(1,0);
    t.z = cv_tvec(2,0);
    Rm.fromRotVector(r);

    camPose.SetPose(Rm,t);
}
コード例 #7
0
bool Recognizer3D::learnSifts(IplImage* tFrame, const TomGine::tgModel& m1, const TomGine::tgPose& pose)
{	
    P::Array<P::KeypointDescriptor*> m_tmp_keys;

    mat3 R, Rt;
    vec3 t;
    TomGine::tgModel m2 = m1;
    m_lastsiftexlist.clear();

    // Transform model vertices to camera coordinats
    pose.GetPose(R,t);
    Rt = R.transpose();
    for(unsigned int i=0; i<m2.m_vertices.size(); i++)
    {
        m2.m_vertices[i].pos = (R * m2.m_vertices[i].pos) + t;
        m2.m_vertices[i].normal = R * m2.m_vertices[i].normal;
    }

    IplImage* tImg = 0;
    IplImage* grey = 0;
    float dcpfx = 1/m_cp.fx;
    float dcpfy = 1/m_cp.fy;

    tImg = cvCreateImage ( cvGetSize ( tFrame ), 8, 3 );
    grey = cvCreateImage ( cvGetSize ( tFrame ), 8, 1 );

    // undistort
    cvRemap(tFrame, tImg, pMapX, pMapY );

    // convert to gray scale image
    cvConvertImage(tImg, grey);

    //EXPERIMENTAL:
    //    cv::Mat descriptors;
    //    std::vector<cv::KeyPoint> keypoints;
    //    cv_detect->extract(cv::Mat(grey, true), descriptors, keypoints);
    //
    //    pal_blort::CvDetect3D::cvKeypoints2BlortKeyPoints(
    //            descriptors, keypoints, m_image_keys);
    // END OF EXPERIMENTAL

    sift.Operate(grey, m_image_keys);

    for(unsigned int i=0; i<m_image_keys.Size(); i++)
    {
        vec3 point, normal;
        std::vector<vec3> pl;		// point list
        std::vector<vec3> nl;		// normal list
        std::vector<double> zl; // z-value list (depth)
        float u,v,x,y;

        u = (float)m_image_keys[i]->p.x;
        v = (float)m_image_keys[i]->p.y;
        x = (u-m_cp.cx) * dcpfx;
        y = (v-m_cp.cy) * dcpfy;

        // Create ray from pixel
        TomGine::tgRay ray;
        ray.start = vec3(0.0f,0.0f,0.0f);
        ray.dir =  vec3(x,y,1.0f);
        ray.dir.normalize();

        if(	TomGine::tgCollission::IntersectRayModel(pl, nl, zl, ray, m2)
            && !pl.empty()
            && !zl.empty())
            {
            // determine nearest intersection point
            unsigned int idx_min = 0;
            float z_min = zl[0];
            for(unsigned int idx=0; idx<zl.size(); idx++){
                if(zl[idx] < z_min){
                    idx_min = idx;
                    z_min = zl[idx];
                }
            }

            if(z_min > 0.0f){
                // Transform to object space
                point = (Rt * (pl[idx_min] - t));
                normal = (Rt * nl[idx_min]);
                vec3 r = (Rt * ray.dir) * zl[idx_min];

                Siftex s;
                s.pos = point;
                s.normal = normal;
                s.viewray = r;

                normal.normalize();
                r.normalize();

                if( (r * normal) < -0.1f)
                {
                    // Save sift
                    m_siftexlist.push_back(s);
                    m_lastsiftexlist.push_back(s);
                    m_image_keys[i]->SetPos(point.x, point.y, point.z);
                    m_tmp_keys.PushBack(m_image_keys[i]);
                }
            }
        }

    }

    if(m_tmp_keys.Size() > 0)
    {
        m_sift_model_learner.AddToModel(m_tmp_keys, *m_sift_models[0]);
        ROS_INFO("[Recognizer3D::learnSifts] added %d sifts to model\n", m_tmp_keys.Size());
        m_model_loaded = true;
    }

    if(m_display){
        display_image = cv::Mat(tImg, true);
    }

    cvReleaseImage(&tImg);
    cvReleaseImage(&grey);

    return true;
}
コード例 #8
0
// Converts pose from Camera coordinates to world space
void ConvertCam2World(const TomGine::tgPose& pC, const TomGine::tgPose& camPose, TomGine::tgPose& pW){

    pW = camPose.Transpose() * pC;
}
コード例 #9
0
int main(int argc, char *argv[] )
{
    std::string config_root = argv[1];

    //this line should force opengl to run software rendering == no GPU
    //putenv("LIBGL_ALWAYS_INDIRECT=1");

    ros::init(argc, argv, "blort_learnsifts");
    ros::NodeHandle nh("blort_learnsifts");
    image_transport::ImageTransport it(nh);
    image_transport::Subscriber image_sub = it.subscribe("/blort_image", 1, &imageCb);
    cam_info_sub = nh.subscribe("/blort_camera", 1, &cam_info_callback);
    ros::Publisher confidences_pub = nh.advertise<blort_ros::TrackerConfidences>("confidences", 100);

    printf("\n Learnsifts \n\n");

    printf(" Tracker control\n");
    printf(" -------------------------------------------\n");
    printf(" [q,Esc] Quit program\n");
    printf(" [Return] Save SIFT and ply models.\n");
    printf(" [Space] Save texture face and extract SIFTS.\n");
    printf(" -------------------------------------------\n");
    printf(" [4] Texture edge tracking\n");
    printf(" [5] Texture color tracking\n");
    printf(" [a] High accuracy tracking (less robust)\n");
    printf(" [e] Show edges of image\n");
    printf(" [i] Print tracking information to console\n");
    printf(" [l] Lock/Unlock tracking\n");
    printf(" [m] Switch display mode of model\n");
    printf(" [p] Show particles\n");
    printf(" [r] Reset tracker to initial pose\n");
    printf(" [s] Save model to file (including textures)\n");
    printf(" [t] Capture model texture from image\n");
    printf(" [u] Untexture/remove texture from model\n");
    printf(" \n\n ");

    blortGLWindow::Event event;
    TomGine::tgTimer timer;
    std::vector<vec3> view_list;

    // File names
    //FIXME: make these ROS parameters or eliminate them and use the content as parameters
    std::string pose_cal = pal_blort::addRoot("bin/pose.cal", config_root);
    std::string tracking_ini(pal_blort::addRoot("bin/tracking.ini", config_root));
    std::vector<std::string> ply_models, sift_files, model_names;
    std::string ply_model, sift_file, model_name;

    GetPlySiftFilenames(tracking_ini.c_str(), ply_models, sift_files, model_names);
    ply_model = ply_models[0]; sift_file = sift_files[0]; model_name = model_names[0];
    printf("Object name: %s\n", model_name.c_str());
    sift_file = pal_blort::addRoot(sift_file, config_root);

    // Get Parameter from file
    TomGine::tgPose camPose, camPoseT;
    Tracking::Tracker::Parameter trackParams;
    GetTrackingParameter(trackParams, tracking_ini.c_str(), config_root);
    getCamPose(pose_cal.c_str(), camPose);
    camPoseT = camPose.Transpose();

    printf("=> Getting camera intrinsics ... ");
    // wait for the first camera_info message
    need_cam_init = true;
    while(need_cam_init)
    {
        ros::spinOnce();
    }
    setCameraPose(tgCamParams, pose_cal.c_str());
    trackParams.camPar = tgCamParams;

    printf("OK\n");

    // Initialise image
    IplImage *image = cvCreateImage( cvSize(camera_info.width, camera_info.height), 8, 3 ); //FIXME dirty

    printf("=> Initialising tracker ... ");

    // Create OpenGL Window and Tracker
    CRecognizerThread* pThreadRec =
            new CRecognizerThread(blortRecognizer::CameraParameter(camera_info));
    blortGLWindow::GLWindow window(trackParams.camPar.width, trackParams.camPar.height, "Training");
    Tracking::TextureTracker tracker;
    tracker.init(trackParams);
    float rec_time = 0.0f;
    printf("OK\n");

    // Model for Tracker
    TomGine::tgPose trPose;
    trPose.t = vec3(0.0, 0.1, 0.0);
    trPose.Rotate(0.0f, 0.0f, 0.5f);    
    std::string modelFullPath = pal_blort::addRoot(ply_model, config_root);
    printf("=> Trying to get the object model from file: %s\n", modelFullPath.c_str());
    int modelID = tracker.addModelFromFile(modelFullPath.c_str(), trPose, model_name.c_str(), true);
    printf(" OK\n");
    tracker.setLockFlag(true);



    // Model for Recognizer / TomGine (ray-model intersection)
    TomGine::tgModel model;
    TomGine::tgPose tg_pose;
    TomGine::tgModelLoader modelloader;
    modelloader.LoadPly(model, pal_blort::addRoot(ply_model, config_root).c_str());

    Tracking::movement_state movement = Tracking::ST_FAST;
    Tracking::quality_state quality = Tracking::ST_OK;
    Tracking::confidence_state confidence = Tracking::ST_BAD;

    need_new_image = true;
    while(need_new_image)
    {
        ros::spinOnce();
    }
    *image = lastImage;

    pThreadRec->Event();

    bool quit = false;
    while(!quit)
    {
        tracker.getModelMovementState(modelID, movement);
        tracker.getModelQualityState(modelID, quality);
        tracker.getModelConfidenceState(modelID, confidence);

        if(confidence == Tracking::ST_GOOD && movement == Tracking::ST_STILL && quality != Tracking::ST_LOCKED)
        {
            ROS_WARN("Sorry, no learning with this node.");
            ROS_INFO("New initial pose fixed: \nt: [%f, %f, %f] \nq:[%f, %f, %f, %f]",
                     trPose.t.x,
                     trPose.t.y,
                     trPose.t.z,
                     trPose.q.x,
                     trPose.q.y,
                     trPose.q.z,
                     trPose.q.w);
        }

        //recovery, if object lost
        if(quality == Tracking::ST_LOST && rec_time > 0.2f)
        {
//            TomGine::tgPose recPose;
//            float conf;
//
//            pThreadRec->Recognize(image, recPose, conf);
//            if(conf > recovery_conf_threshold)
//            {
//                ConvertCam2World(recPose, camPose, trPose);
//                tracker.setModelInitialPose(modelID, trPose);
//                tracker.resetUnlockLock();
//            }
//            //tracker.reset();
//
//            ROS_INFO("orig conf: %f", conf);
//
//            // if the recovery's confidence is high enough then
//            // kick the tracker out of this state and let it try
//
//
//            rec_time = 0.0f;
//            ROS_WARN("Tried to recover for the %d. time.", ++rec3dcounter);
        }else{
            rec_time += timer.Update();
        }

        if(!need_new_image)
        {
            // Get image
            timer.Update();
            *image = lastImage;

            // Track object
            tracker.image_processing((unsigned char*)image->imageData);
            tracker.track();

            tracker.drawImage(0);
            tracker.drawCoordinates();
            tracker.getModelPose(modelID, trPose);
            tracker.drawResult(2.0f);
            tg_pose = trPose;
            need_new_image = true;
        }
        // Keyboard inputs:
        while(window.GetEvent(event)){
            quit = !InputControl(&tracker, event, config_root);
            if(event.type == blortGLWindow::TMGL_Press)
            {
                if(event.input == blortGLWindow::TMGL_Space)
                {
                    // When hitting space bar -> Learn new sift features
                    TomGine::tgPose pT1;
                    TomGine::tgPose pT2, pT3, pT4;
                    tracker.getModelPoseCamCoords(modelID, pT1);

                    mat3 R; vec3 a;
                    pT1.GetPose(R, a);
                    a = R.transpose() * pT1.t;
                    a.normalize();
                    view_list.push_back(a);

                    pT2 = pT1;
                    pThreadRec->LearnSifts(image, model, pT2);

                    std::vector<blortRecognizer::Siftex> sl;
                    pThreadRec->GetLastSifts(sl);
                    tracker.textureFromImage(true);
                }
                else if(event.input == blortGLWindow::TMGL_Return)
                {
                    // When hitting Return -> Save sift model and recognize
                    //TomGine::tgPose recPose;
                    //float conf;
                    pThreadRec->SaveSiftModel(sift_file.c_str());
                    //pThreadRec->Recognize(image, recPose, conf);
                    //ConvertCam2World(recPose, camPose, trPose);
                    //tracker.setModelInitialPose(modelID, trPose);
                    //tracker.reset(modelID);
                    tracker.saveModels(pal_blort::addRoot("Resources/ply/", config_root).c_str());
                }
            }
            event.type = blortGLWindow::TMGL_None;
        }

        window.Update();

        //publish tracker inner confidences just for fun
        Tracking::ModelEntry* myModelEntry = tracker.getModelEntry(modelID);
        blort_ros::TrackerConfidences tr_conf;
        tr_conf.edgeConf = myModelEntry->c_edge;
        tr_conf.confThreshold = myModelEntry->c_th;
        tr_conf.lostConf = myModelEntry->c_lost;
        tr_conf.distance = myModelEntry->t;
	confidences_pub.publish(tr_conf);

        window.Activate();
        ros::spinOnce();
    }
    delete(pThreadRec);
    cvReleaseImage(&image);
}