void testLambda(int dimAmb) { ifstream infile; infile.open("C:\\Users\\503\\Desktop\\te.txt",ios::in); if (!infile) { cerr<<"open file err!"<<endl; exit(1); } math::matrix<double>Ms(dimAmb+1,dimAmb),xt,Q,F(dimAmb,2); for (int m=0;m<dimAmb+1;m++) { for (int n=0;n<dimAmb;n++) { infile>>Ms(m,n); } } xt=~GetBlockMat(Ms,1,1,1,dimAmb,2); Q=GetBlockMat(Ms,2,dimAmb+1,1,dimAmb,2); Ambiguity amb; double s[2]; amb.Lambda(dimAmb,2,xt,Q,F,s); cout<<F; cout<<s[1]/s[0]<<endl; }
int SingleSLAM::poseUpdate3D2D() { propagateFeatureStates(); //get the feature points corresponding to the map points std::vector<Track2DNode*> nodes; int num = getStaticMappedTrackNodes(nodes); if (num < 5) { repErr( "[camera id:%d]intra-camera pose update failed! less than five static map points (%d)", camId, num); return -1; } //get 2D-3D corresponding points Mat_d ms(num, 2), Ms(num, 3), repErrs(num, 1); for (int i = 0; i < num; i++) { ms.data[2 * i] = nodes[i]->pt->x; ms.data[2 * i + 1] = nodes[i]->pt->y; Ms.data[3 * i] = nodes[i]->pt->mpt->x; Ms.data[3 * i + 1] = nodes[i]->pt->mpt->y; Ms.data[3 * i + 2] = nodes[i]->pt->mpt->z; repErrs.data[i] = nodes[i]->pt->reprojErr; } //get 2D-2D corresponding points //TODO return 0; }
int main() { scanf("%d",&N); for (int i=1;i<=N;++i) { scanf("%d",&Val(i)); Sum(i)=Ms(i)=Lms(i)=Rms(i)=Val(i); Par(i)=i-1,Rch(i-1)=i,Size(i)=N-i+1; splay(root,i); } splay(root,1),Par(N+1)=1,Lch(1)=N+1,Size(N+1)=1,splay(root,N+1); splay(root,N),Par(N+2)=N,Rch(N)=N+2,Size(N+2)=1,splay(root,N+2); tot=N+2; for (scanf("%d",&Que);Que--;) { scanf("%s%d",cmd,&x); if (cmd[0]=='D') D(Findkth(root,x+1)); else { scanf("%d",&y); if (cmd[0]=='Q') Q(Findkth(root,x),Findkth(root,y+2)); else if (cmd[0]=='I') I(Findkth(root,x+1),y); else R(Findkth(root,x+1),y); } } print_r(root); printf("\n"); print_l(root); printf("\n"); return 0; }
inline void Q(int x,int y) { splay(root,y); Par(Lch(y))=0; splay(Lch(y),x); Par(x)=y; printf("%d\n",Ms(Rch(x))); }
inline void I(int x,int y) { splay(root,x); int v=Lch(x); for (;Rch(v);v=Rch(v)); Rch(v)=++tot,Par(tot)=v,Size(tot)=1; Sum(tot)=Ms(tot)=Lms(tot)=Rms(tot)=Val(tot)=y; splay(root,tot); }
inline void Tupdate(int x) { Size(x)=Size(Lch(x))+1+Size(Rch(x)); Sum(x)=Sum(Lch(x))+Val(x)+Sum(Rch(x)); Lms(x)=max(Lms(Lch(x)),Sum(Lch(x))+Val(x)+Lms(Rch(x))); Rms(x)=max(Rms(Rch(x)),Rms(Lch(x))+Val(x)+Sum(Rch(x))); Ms(x)=Rms(Lch(x))+Val(x)+Lms(Rch(x)); if (Lch(x)) Ms(x)=max(Ms(x),Ms(Lch(x))); if (Rch(x)) Ms(x)=max(Ms(x),Ms(Rch(x))); }
bool PoseUpdaterGNLS2::updatePose(std::vector<Keypoint>& keypoints, cv::Mat& pose) { count = static_cast<int>(keypoints.size()); cv::Mat Ps(count*3,1, CV_64F); cv::Mat Ms(count*3,1, CV_64F); //cv::Mat K(3,3,CV_64F,projector.k); double *kinv = projector.kinv; double scale = projector.scale; double * M_ = (double *)Ms.data; double * P_ = (double *)Ps.data; for (int k=0; k<count; ++k) { Keypoint& kp = keypoints[k]; double mx = kp.map_point.x-map_center.x; double my = kp.map_point.y-map_center.y; double r = sqrt( scale*scale + my*my ); M_[0] = scale * sin(mx/scale) / r; M_[1] = my / r; M_[2] = scale * cos(mx/scale) / r; double px = kp.img_point.x; double py = kp.img_point.y; //double pz = 1.0; P_[0] = kinv[0] * px + kinv[1] * py + kinv[2]; P_[1] = kinv[3] * px + kinv[4] * py + kinv[5]; P_[2] = kinv[6] * px + kinv[7] * py + kinv[8]; double nP = sqrt(P_[0]*P_[0] + P_[1]*P_[1] + P_[2]*P_[2]); P_[0] /= nP; P_[1] /= nP; P_[2] /= nP; M_ += 3; P_ += 3; } cv::Mat R_rod(3, 1, CV_64F); cv::Rodrigues(pose, R_rod); run(R_rod, Ps, Ms, 5, TUKEY_LM); cv::Rodrigues(R_rod, pose); return true; }
int main() { TimePoint<Sec> time_point_sec(Sec(4)); // implicit cast, no precision loss TimePoint<Ms> time_point_ms(time_point_sec); print_ms(time_point_ms); // 4000 ms time_point_ms = TimePoint<Ms>(Ms(5756)); // explicit cast, need when precision loss may happens // 5756 truncated to 5000 time_point_sec = std::chrono::time_point_cast<Sec>(time_point_ms); print_ms(time_point_sec); // 5000 ms }
bool PoseUpdaterRANSAC::updatePose(std::vector<Keypoint>& keypoints, cv::Mat& pose) { const int count = static_cast<int>(keypoints.size()); cv::Mat Ps(count, 3, CV_64F); cv::Mat Ms(count, 3, CV_64F); //cv::Mat K(3,3,CV_64F,projector.k); double *kinv = projector.kinv; double scale = projector.scale; for (int k=0; k<count; ++k) { Keypoint& kp = keypoints[k]; double mx = kp.map_point.x-map_center.x; double my = kp.map_point.y-map_center.y; double r = sqrt( scale*scale + my*my ); double * M_ = Ms.ptr<double>(k); M_[0] = scale * sin(mx/scale) / r; M_[1] = my / r; M_[2] = scale * cos(mx/scale) / r; double * P_ = Ps.ptr<double>(k); double px = kp.img_point.x; double py = kp.img_point.y; //double pz = 1.0; P_[0] = kinv[0] * px + kinv[1] * py + kinv[2]; P_[1] = kinv[3] * px + kinv[4] * py + kinv[5]; P_[2] = kinv[6] * px + kinv[7] * py + kinv[8]; double nP = sqrt(P_[0]*P_[0] + P_[1]*P_[1] + P_[2]*P_[2]); P_[0] /= nP; P_[1] /= nP; P_[2] /= nP; } cv::Mat R; cv::Mat mask; bool success = run(Ps, Ms, R, mask, reproj_threshold, confidence, max_iters); if (success) { fit(Ps, Ms, mask, pose); } }
//Specifies the amount of time to wait for the synchronization object to be available (signaled). //If INFINITE, Lock will wait until the object is signaled before returning. //DWORD TimeOut_ms = INFINITE = 4294967295 int CMutex::Lock(int TimeOut_ms/* = 4294967295*/){ //std::condition_variable_any; // printf("Thread %.8x %.8x: Waiting to get lock \n", t); long int tid = getThreadId(); //printf("%s:%d Mutex: %s waiting max timout %d thread %ld lockCount %d", __FILE__, __LINE__, name,TimeOut_ms,tid,lockCount); using Ms = std::chrono::milliseconds; bool success = mutex.try_lock_for(Ms(TimeOut_ms)); if(success){ owner = tid; lockCount++; } //printf(" %s\n",success?"Success":"Failed"); //printf(" %s lockCount %d \n",success?"Success":"Failed",lockCount); return success?1:0; }
void Camera::estimateExtrinsics(const std::vector<cv::Point3f>& objectPoints, const std::vector<cv::Point2f>& imagePoints, cv::Mat& rvec, cv::Mat& tvec) const { std::vector<cv::Point2f> Ms(imagePoints.size()); for (size_t i = 0; i < Ms.size(); ++i) { Eigen::Vector3d P; liftProjective(Eigen::Vector2d(imagePoints.at(i).x, imagePoints.at(i).y), P); P /= P(2); Ms.at(i).x = P(0); Ms.at(i).y = P(1); } // assume unit focal length, zero principal point, and zero distortion cv::solvePnP(objectPoints, Ms, cv::Mat::eye(3, 3, CV_64F), cv::noArray(), rvec, tvec); }
//#define DEBUG_MODE int SingleSLAM::poseUpdate3D(bool largeErr) { propagateFeatureStates(); //get the feature points corresponding to the map points std::vector<Track2DNode*> nodes; int num = getStaticMappedTrackNodes(nodes); if (num < 1) { double* cR = m_camPos.current()->R; double* cT = m_camPos.current()->t; CamPoseItem* camPos = m_camPos.add(currentFrame(), camId,cR, cT); updateCamParamForFeatPts(K, camPos); warn( "[camera id:%d]intra-camera pose update failed! less than five static map points (%d)", camId, num); //leaveBACriticalSection(); //CoSLAM::ptr->pause(); //enterBACriticalSection(); return -1; } //choose the feature points for pose estimation std::vector<FeaturePoint*> featPts; chooseStaticFeatPts(featPts); std::vector<FeaturePoint*> mappedFeatPts; mappedFeatPts.reserve(nRowBlk * nColBlk * 2); for (size_t i = 0; i < featPts.size(); i++) { if (featPts[i]->mpt) mappedFeatPts.push_back(featPts[i]); } //get the 2D-3D corresponding points int n3D2Ds = mappedFeatPts.size(); Mat_d ms(n3D2Ds, 2), Ms(n3D2Ds, 3), covs(n3D2Ds, 9); for (int i = 0; i < n3D2Ds; i++) { FeaturePoint* fp = mappedFeatPts[i]; ms.data[2 * i] = fp->x; ms.data[2 * i + 1] = fp->y; Ms.data[3 * i] = fp->mpt->x; Ms.data[3 * i + 1] = fp->mpt->y; Ms.data[3 * i + 2] = fp->mpt->z; memcpy(covs.data, fp->mpt->cov, sizeof(double) * 9); } Mat_d R(3, 3), t(3, 1); double* cR = m_camPos.current()->R; double* cT = m_camPos.current()->t; IntraCamPoseOption opt; //test Mat_d old_errs(n3D2Ds, 1); //get reprojection error beform pose update for (int i = 0; i < n3D2Ds; i++) { FeaturePoint* fp = mappedFeatPts[i]; double m[2]; project(K.data, cR, cT, fp->mpt->M, m); old_errs[i] = dist2(m, fp->m); } //end of test intraCamEstimate(K.data, cR, cT, Ms.rows, 0, Ms.data, ms.data, Param::maxErr, R.data, t.data, &opt); Mat_d new_errs(n3D2Ds, 1); for (int i = 0; i < n3D2Ds; i++) { FeaturePoint* fp = mappedFeatPts[i]; double m[2]; project(K.data, R, t, fp->mpt->M, m); new_errs[i] = dist2(m, fp->m); } //find outliers int numOut = 0; double errThres = largeErr ? 6.0 : 2.0; double rm[2], var[4], ivar[4]; for (int i = 0; i < num; i++) { double* pM = nodes[i]->pt->mpt->M; double* pCov = nodes[i]->pt->mpt->cov; project(K, R, t, pM, rm); getProjectionCovMat(K, R, t, pM, pCov, var, Const::PIXEL_ERR_VAR); mat22Inv(var, ivar); double err = mahaDist2(rm, nodes[i]->pt->m, ivar); if (err < errThres) { //inlier nodes[i]->pt->reprojErr = err; seqTriangulate(K, R, t, nodes[i]->pt->m, pM, pCov, Const::PIXEL_ERR_VAR); project(K, R, t, pM, rm); getProjectionCovMat(K, R, t, pM, pCov, var, Const::PIXEL_ERR_VAR); mat22Inv(var, ivar); err = mahaDist2(rm, nodes[i]->pt->m, ivar); // if (err >= 1) { // nodes[i]->pt->mpt->setUncertain(); // //test // printf("poseUpdate:1\n"); // } } else { //outliers numOut++; double repErr = dist2(rm, nodes[i]->pt->m); nodes[i]->pt->reprojErr = repErr; nodes[i]->pt->mpt->setUncertain(); } } CamPoseItem* camPos = m_camPos.add(currentFrame(), camId, R.data, t.data); updateCamParamForFeatPts(K, camPos); // if (currentFrame() > 9) // MyApp::bStop = true; #ifdef DEBUG_MODE //if the number of outliers are too much (may due to some distant points) if (currentFrame() >= 76) { //test printf("f:%d,cam:%d : n3d2d:%d, num:%d, numOut:%d\n", currentFrame(), camId, n3D2Ds, num, numOut); char dirPath[1024]; sprintf(dirPath, "/home/tsou/slam_posefailed/%s", MyApp::timeStr); mkdir(dirPath, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); savePGM(m_img, "/home/tsou/slam_posefailed/%s/%d_img_%d.pgm", MyApp::timeStr, currentFrame(), camId); writeMat(Ms, "/home/tsou/slam_posefailed/%s/%d_pts3d_%d.txt", MyApp::timeStr, currentFrame(), camId); writeMat(ms, "/home/tsou/slam_posefailed/%s/%d_pts2d_%d.txt", MyApp::timeStr, currentFrame(), camId); writeMat(covs, "/home/tsou/slam_posefailed/%s/%d_cov3d_%d.txt", MyApp::timeStr, currentFrame(), camId); writeMat(3, 3, K, "/home/tsou/slam_posefailed/%s/%d_K_%d.txt", MyApp::timeStr, currentFrame(), camId); writeMat(old_errs, "/home/tsou/slam_posefailed/%s/%d_old_errs_%d.txt", MyApp::timeStr, currentFrame(), camId); writeMat(new_errs, "/home/tsou/slam_posefailed/%s/%d_new_errs_%d.txt", MyApp::timeStr, currentFrame(), camId); writeMat(3, 3, cR, "/home/tsou/slam_posefailed/%s/%d_oldR_%d.txt", MyApp::timeStr, currentFrame(), camId); writeMat(3, 1, cT, "/home/tsou/slam_posefailed/%s/%d_oldT_%d.txt", MyApp::timeStr, currentFrame(), camId); writeMat(R, "/home/tsou/slam_posefailed/%s/%d_R_%d.txt", MyApp::timeStr, currentFrame(), camId); writeMat(t, "/home/tsou/slam_posefailed/%s/%dT_%d.txt", MyApp::timeStr, currentFrame(), camId); //test logInfo("program paused for debug at camera %d!\n", currentFrame(), camId); leaveBACriticalSection(); CoSLAM::ptr->pause(); enterBACriticalSection(); } #endif return num; }
int SingleSLAM::fastPoseUpdate3D() { propagateFeatureStates(); //get the feature points corresponding to the map points std::vector<Track2DNode*> nodes; int num = getStaticMappedTrackNodes(nodes); if (num < 5) { repErr( "[camera id:%d]intra-camera pose update failed! less than five static map points (%d)", camId, num); return -1; } //choose the feature points for pose estimation std::vector<FeaturePoint*> featPts; int iChoose = chooseStaticFeatPts(featPts); //test logInfo("number of chosen features :%d\n", iChoose); std::vector<FeaturePoint*> mappedFeatPts; std::vector<FeaturePoint*> unmappedFeatPts; mappedFeatPts.reserve(nRowBlk * nColBlk * 2); unmappedFeatPts.reserve(nRowBlk * nColBlk * 2); for (size_t i = 0; i < featPts.size(); i++) { if (featPts[i]->mpt) mappedFeatPts.push_back(featPts[i]); else if (featPts[i]->preFrame) unmappedFeatPts.push_back(featPts[i]); } //get the 2D-3D corresponding points int n3D2Ds = mappedFeatPts.size(); Mat_d ms(n3D2Ds, 2), Ms(n3D2Ds, 3), repErrs(n3D2Ds, 1); for (int i = 0; i < n3D2Ds; i++) { FeaturePoint* fp = mappedFeatPts[i]; ms.data[2 * i] = fp->x; ms.data[2 * i + 1] = fp->y; Ms.data[3 * i] = fp->mpt->x; Ms.data[3 * i + 1] = fp->mpt->y; Ms.data[3 * i + 2] = fp->mpt->z; repErrs.data[i] = fp->reprojErr; } //get the 2D-2D corresponding points int n2D2Ds = unmappedFeatPts.size(); Mat_d ums(n2D2Ds, 2), umspre(n2D2Ds, 2), Rpre(n2D2Ds, 9), tpre(n2D2Ds, 3); for (int i = 0; i < n2D2Ds; i++) { FeaturePoint* fp = unmappedFeatPts[i]; ums.data[2 * i] = fp->x; ums.data[2 * i + 1] = fp->y; //travel back to the frame of the first appearance //while (fp->preFrame) { fp = fp->preFrame; //} assert(fp); umspre.data[2 * i] = fp->x; umspre.data[2 * i + 1] = fp->y; doubleArrCopy(Rpre.data, i, fp->cam->R, 9); doubleArrCopy(tpre.data, i, fp->cam->t, 3); } //estimate the camera pose using both 2D-2D and 3D-2D correspondences double R[9], t[3]; double* cR = m_camPos.current()->R; double* cT = m_camPos.current()->t; // //test // logInfo("==============start of camId:%d=================\n", camId); // logInfo("n3D2D:%d, n2D2D:%d\n", n3D2Ds, n2D2Ds); // // write(K, "/home/tsou/data/K.txt"); // write(cR, 3, 3, "/home/tsou/data/%d_R0.txt", camId); // write(cT, 3, 1, "/home/tsou/data/%d_T0.txt", camId); // write(repErrs, "/home/tsou/data/%d_errs.txt", camId); // write(Ms, "/home/tsou/data/%d_Ms.txt", camId); // write(ms, "/home/tsou/data/%d_ms.txt", camId); // write(Rpre, "/home/tsou/data/%d_Rpre.txt", camId); // write(tpre, "/home/tsou/data/%d_tpre.txt", camId); // write(umspre, "/home/tsou/data/%d_umspre.txt", camId); // write(ums, "/home/tsou/data/%d_ums.txt", camId); // // //test // printMat(3, 3, cR); // printMat(3, 1, cT); IntraCamPoseOption opt; double R_tmp[9], t_tmp[3]; intraCamEstimate(K.data, cR, cT, n3D2Ds, repErrs.data, Ms.data, ms.data, 6, R_tmp, t_tmp, &opt); if (getCameraDistance(R_tmp, t_tmp, Rpre.data, tpre.data) > 1000) { opt.verboseLM = 1; intraCamEstimateEpi(K.data, R_tmp, t_tmp, n3D2Ds, repErrs.data, Ms.data, ms.data, n2D2Ds, 0, Rpre.data, tpre.data, umspre.data, ums.data, 6, R, t, &opt); } else { doubleArrCopy(R, 0, R_tmp, 9); doubleArrCopy(t, 0, t_tmp, 3); } // printMat(3, 3, cR); // printMat(3, 1, cT); // printMat(3, 3, R); // printMat(3, 1, cT); // logInfo("==============end of camId:%d=================\n", camId); // intraCamEstimate(K.data,cR,cT,n3D2Ds, repErrs.data,Ms.data,ms.data,6.0,R,t,&opt); // find outliers int numOut = 0; double rm[2], var[4], ivar[4]; for (int i = 0; i < num; i++) { double* pM = nodes[i]->pt->mpt->M; double* pCov = nodes[i]->pt->mpt->cov; project(K, R, t, pM, rm); getProjectionCovMat(K, R, t, pM, pCov, var, Const::PIXEL_ERR_VAR); mat22Inv(var, ivar); double err = mahaDist2(rm, nodes[i]->pt->m, ivar); if (err < 1) { //inlier nodes[i]->pt->reprojErr = err; seqTriangulate(K, R, t, nodes[i]->pt->m, pM, pCov, Const::PIXEL_ERR_VAR); project(K, R, t, pM, rm); getProjectionCovMat(K, R, t, pM, pCov, var, Const::PIXEL_ERR_VAR); mat22Inv(var, ivar); err = mahaDist2(rm, nodes[i]->pt->m, ivar); if (err >= 1) { nodes[i]->pt->mpt->setFalse(); } } else { //outliers numOut++; double repErr = dist2(rm, nodes[i]->pt->m); nodes[i]->pt->reprojErr = repErr; nodes[i]->pt->mpt->setUncertain(); } } CamPoseItem* camPos = m_camPos.add(currentFrame(), camId, R, t); updateCamParamForFeatPts(K, camPos); return num; }