Example #1
0
vec3 tgErrorMetric::GetMean(const TomGine::tgPose &p1, const TomGine::tgPose &p2)
{
	double err_x = 0.0;
	double err_y = 0.0;
	double err_z = 0.0;
	double dN = 1.0 / pointlist.size();
	for(unsigned i=0; i<pointlist.size(); i++){
		mat3 R1, R2;
		vec3 t1, t2;
		
		p1.GetPose(R1,t1);
		p2.GetPose(R2,t2);
		
		vec3 verr = (R1 * pointlist[i] + t1) - (R2 * pointlist[i] + t2);
		
		err_x += double(fabs(verr.x)) * dN;
		err_y += double(fabs(verr.y)) * dN;
		err_z += double(fabs(verr.z)) * dN;
	}
	vec3 err = vec3(err_x, err_y, err_z);
	return err;
}
bool Recognizer3D::learnSifts(IplImage* tFrame, const TomGine::tgModel& m1, const TomGine::tgPose& pose)
{	
    P::Array<P::KeypointDescriptor*> m_tmp_keys;

    mat3 R, Rt;
    vec3 t;
    TomGine::tgModel m2 = m1;
    m_lastsiftexlist.clear();

    // Transform model vertices to camera coordinats
    pose.GetPose(R,t);
    Rt = R.transpose();
    for(unsigned int i=0; i<m2.m_vertices.size(); i++)
    {
        m2.m_vertices[i].pos = (R * m2.m_vertices[i].pos) + t;
        m2.m_vertices[i].normal = R * m2.m_vertices[i].normal;
    }

    IplImage* tImg = 0;
    IplImage* grey = 0;
    float dcpfx = 1/m_cp.fx;
    float dcpfy = 1/m_cp.fy;

    tImg = cvCreateImage ( cvGetSize ( tFrame ), 8, 3 );
    grey = cvCreateImage ( cvGetSize ( tFrame ), 8, 1 );

    // undistort
    cvRemap(tFrame, tImg, pMapX, pMapY );

    // convert to gray scale image
    cvConvertImage(tImg, grey);

    //EXPERIMENTAL:
    //    cv::Mat descriptors;
    //    std::vector<cv::KeyPoint> keypoints;
    //    cv_detect->extract(cv::Mat(grey, true), descriptors, keypoints);
    //
    //    pal_blort::CvDetect3D::cvKeypoints2BlortKeyPoints(
    //            descriptors, keypoints, m_image_keys);
    // END OF EXPERIMENTAL

    sift.Operate(grey, m_image_keys);

    for(unsigned int i=0; i<m_image_keys.Size(); i++)
    {
        vec3 point, normal;
        std::vector<vec3> pl;		// point list
        std::vector<vec3> nl;		// normal list
        std::vector<double> zl; // z-value list (depth)
        float u,v,x,y;

        u = (float)m_image_keys[i]->p.x;
        v = (float)m_image_keys[i]->p.y;
        x = (u-m_cp.cx) * dcpfx;
        y = (v-m_cp.cy) * dcpfy;

        // Create ray from pixel
        TomGine::tgRay ray;
        ray.start = vec3(0.0f,0.0f,0.0f);
        ray.dir =  vec3(x,y,1.0f);
        ray.dir.normalize();

        if(	TomGine::tgCollission::IntersectRayModel(pl, nl, zl, ray, m2)
            && !pl.empty()
            && !zl.empty())
            {
            // determine nearest intersection point
            unsigned int idx_min = 0;
            float z_min = zl[0];
            for(unsigned int idx=0; idx<zl.size(); idx++){
                if(zl[idx] < z_min){
                    idx_min = idx;
                    z_min = zl[idx];
                }
            }

            if(z_min > 0.0f){
                // Transform to object space
                point = (Rt * (pl[idx_min] - t));
                normal = (Rt * nl[idx_min]);
                vec3 r = (Rt * ray.dir) * zl[idx_min];

                Siftex s;
                s.pos = point;
                s.normal = normal;
                s.viewray = r;

                normal.normalize();
                r.normalize();

                if( (r * normal) < -0.1f)
                {
                    // Save sift
                    m_siftexlist.push_back(s);
                    m_lastsiftexlist.push_back(s);
                    m_image_keys[i]->SetPos(point.x, point.y, point.z);
                    m_tmp_keys.PushBack(m_image_keys[i]);
                }
            }
        }

    }

    if(m_tmp_keys.Size() > 0)
    {
        m_sift_model_learner.AddToModel(m_tmp_keys, *m_sift_models[0]);
        ROS_INFO("[Recognizer3D::learnSifts] added %d sifts to model\n", m_tmp_keys.Size());
        m_model_loaded = true;
    }

    if(m_display){
        display_image = cv::Mat(tImg, true);
    }

    cvReleaseImage(&tImg);
    cvReleaseImage(&grey);

    return true;
}