Пример #1
0
int trackGetCorrespondence(const Track2DPtrList& tks , int f1 , int f2 , double* pts1 , double* pts2) {
	if (f1 >= f2)
		repErr("tracks_get_corresponds() : f1 < f2 is required.");

	Track2DPtrList::const_iterator iter;
	int cn = 0;
	for (iter = tks.begin(); iter != tks.end(); ++iter) {
		Track2D* tk = *iter;
		if (tk->f1 <= f1 && tk->f2 >= f2) {
			Track2DNode* p = tk->head.next;
			while (p) {
				if (p->f == f1) {
					pts1[cn * 2] = p->x;
					pts1[cn * 2 + 1] = p->y;
				} else if (p->f == f2) {
					pts2[cn * 2] = p->x;
					pts2[cn * 2 + 1] = p->y;
				}
				p = p->next;
			}
			cn++;
		}
	}
	return cn;
}
Пример #2
0
int SingleSLAM::poseUpdate3D2D() {
	propagateFeatureStates();

//get the feature points corresponding to the map points
	std::vector<Track2DNode*> nodes;
	int num = getStaticMappedTrackNodes(nodes);
	if (num < 5) {
		repErr(
				"[camera id:%d]intra-camera pose update failed! less than five static map points (%d)",
				camId, num);
		return -1;
	}

//get 2D-3D corresponding points
	Mat_d ms(num, 2), Ms(num, 3), repErrs(num, 1);
	for (int i = 0; i < num; i++) {
		ms.data[2 * i] = nodes[i]->pt->x;
		ms.data[2 * i + 1] = nodes[i]->pt->y;

		Ms.data[3 * i] = nodes[i]->pt->mpt->x;
		Ms.data[3 * i + 1] = nodes[i]->pt->mpt->y;
		Ms.data[3 * i + 2] = nodes[i]->pt->mpt->z;

		repErrs.data[i] = nodes[i]->pt->reprojErr;
	}

//get 2D-2D corresponding points
//TODO
	return 0;
}
int getMatchingFromTracks(int nTracks, const Track2D* vecPTracks, int f1, int f2, Mat_d& pts1, Mat_d& pts2) {
	if (f1 >= f2)
		repErr("getMatchingFrameTracks - f1 >= f2\n");

	pts1.resize(nTracks, 2);
	pts2.resize(nTracks, 2);

	int k = 0;
	for (int i = 0; i < nTracks; i++) {
		Track2DNode* p = vecPTracks[i].head.next;
		Track2DNode* tail = vecPTracks[i].tail->next;
		bool bFoundPts1 = false;
		while (p != tail) {
			if (p->f == f1) {
				pts1.data[2 * k] = p->x;
				pts1.data[2 * k + 1] = p->y;
				bFoundPts1 = true;
			}
			if (bFoundPts1 && p->f == f2) {
				pts2.data[2 * k] = p->x;
				pts2.data[2 * k + 1] = p->y;
				k++;
				break; //found a matching in the track, jump to the next track
			}
			p = p->next;
		}
	}
	pts1.rows = k;
	pts2.rows = k;
	return k;
}
Пример #4
0
bool GLScenePane::load(const char* file_path) {
	try {
		ifstream file(file_path);
		if (!file)
			repErr("cannot open '%s' to read!", file_path);

		file >> m_pointSize;
		file >> m_followCamId;
		file >> m_camView;
		file >> m_autoScale;
		file >> m_drawDynTrj;
		file >> m_nTrjLen;
		file >> m_camSize;
		file >> m_center[0] >> m_center[1] >> m_center[2];
		file >> m_scale;

		for (int i = 0; i < 16; ++i) {
			file >> _matrix[i];
		}
	} catch (SL_Exception& e) {
		cout << e.what();
		return false;
	}
	return true;
}
Пример #5
0
void USBCamReader::open() {
	if (videoCap)
		cvReleaseCapture(&videoCap);
	videoCap = cvCaptureFromCAM(camid);
	cvSetCaptureProperty(videoCap, CV_CAP_PROP_FRAME_WIDTH, 640);
	cvSetCaptureProperty(videoCap, CV_CAP_PROP_FRAME_HEIGHT, 480);
	cvSetCaptureProperty(videoCap, CV_CAP_PROP_FPS, 30);
	if (!videoCap) {
		repErr("ERROR: Fail to detect the USB camera \n");
	}
	_w = (int)cvGetCaptureProperty(videoCap, CV_CAP_PROP_FRAME_WIDTH);
	_h = (int)cvGetCaptureProperty(videoCap, CV_CAP_PROP_FRAME_HEIGHT);
	if (_w <=0 || _h <= 0)
		repErr("ERROR: Fail to open the USB camera \n");

	_tm.tic();
}
Пример #6
0
void RobustBundleRTS::parseInputs(
		std::map<MapPoint*, std::vector<const FeaturePoint*> >& mapPts) {
	mapPoints.clear();
	pt3Ds.clear();
	meas2Ds.clear();

	size_t validNum = 0;
	size_t featNum = 0;

	std::map<MapPoint*, std::vector<const FeaturePoint*> >::iterator iter;
	for (iter = mapPts.begin(); iter != mapPts.end(); iter++) {
		size_t nfpts = iter->second.size();
		if (nfpts > 1) {
			//add map point
			MapPoint* mpt = iter->first;
			mapPt2ind[mpt] = validNum;
			int2MapPt[validNum] = mpt;

			mapPoints.push_back(mpt);
			pt3Ds.push_back(Point3d(mpt->x, mpt->y, mpt->z));

			validNum++;
			featNum += nfpts;

			meas2Ds.push_back(std::vector<Meas2D>());

			//add corresponding feature points
			std::vector<const FeaturePoint*> vecFeatPts;
			vecFeatPts.resize(keyCamPoses.size(), 0);
			for (size_t i = 0; i < nfpts; i++) {
				const FeaturePoint* fpt = iter->second[i];
				std::pair<int, int> camFrameId = std::make_pair(fpt->f,
						fpt->camId);
				if (cameraInd.count(camFrameId) == 0)
					repErr(
							"RobustBundleRTS::parseInputs - cannot find the corresponding camera (frame:%d, camId:%d)",
							fpt->f, fpt->camId);
				else {
					int camId = cameraInd[camFrameId];
					vecFeatPts[camId] = fpt;
				}
			}
			for (size_t i = 0; i < keyCamPoses.size(); i++) {
				if (vecFeatPts[i]) {
					meas2Ds.back().push_back(
							Meas2D(i, vecFeatPts[i]->x, vecFeatPts[i]->y));
				}
			}
			//test
			//logInfo("%d : %d\n", validNum - 1, nfpts);
		}
	}
	//test
//	logInfo("total camera poses:%d\n", keyCamPoses.size());
//	logInfo("total map points:%d\n", validNum);
//	logInfo("total feature points:%d\n", featNum);
}
KeyFrame* KeyFrameList::add(int frame) {
	if (frame < 0)
		repErr("KeyFrmLst::add() error!");

	if (tail == 0) {
		KeyFrame* frm = new KeyFrame(frame);
		head.next = frm;
		tail = frm;
	} else {
		if (frame < tail->f)
			repErr("KeyFrmLst::add() frame (%d) < tail->cam->f(%d)\n", frame, tail->f);
		KeyFrame* frm = new KeyFrame(frame);
		tail->next = frm;
		frm->prev = tail;
		tail = frm;
	}
	num++;
	return tail;
}
bool AVIReader::open() {
	if (videoCap)
		cvReleaseCapture(&videoCap);
	videoCap = cvCaptureFromFile(filePath.c_str());
	if (!videoCap){
		repErr("Cannot open %s\n", filePath.c_str());
		return false;
	}
	_w = (int) cvGetCaptureProperty(videoCap, CV_CAP_PROP_FRAME_WIDTH);
	_h = (int) cvGetCaptureProperty(videoCap, CV_CAP_PROP_FRAME_HEIGHT);

	//assert(_w > 0 && _h > 0);
	if (_w <=0 || _h <= 0){
			repErr("ERROR: Fail to open the USB camera \n");
			return false;
	}
	_timestamp = 0;
	return true;
}
KeyPose* KeyPoseList::add(int f, CamPoseItem* cam_) {
	if (!cam_)
		repErr("KeyFrmLst::add() error!");
	if (tail == 0) {
		KeyPose* pose = new KeyPose(f, cam_);
		head.next = pose;
		tail = pose;
	} else {
		if (cam_->f < tail->cam->f)
			repErr("KeyFrmLst::add() cam_->f < tail->cam->f");
		KeyPose* frm = new KeyPose(f, cam_);

		tail->next = frm;
		frm->pre = tail;
		tail = frm;
	}
	num++;
	return tail;
}
Пример #10
0
void AVIReader::open() {
	if (videoCap)
		cvReleaseCapture(&videoCap);
	videoCap = cvCaptureFromFile(filePath.c_str());
	if (!videoCap)
		repErr("Cannot open %s\n", filePath.c_str());
	_w = (int) cvGetCaptureProperty(videoCap, CV_CAP_PROP_FRAME_WIDTH);
	_h = (int) cvGetCaptureProperty(videoCap, CV_CAP_PROP_FRAME_HEIGHT);

	assert(_w > 0 && _h > 0);
	_timestamp = 0;
}
int BundleRTS::add3DPoint(MapPoint* pt3d) {
    if (numPts >= maxNumPts)
        repErr("BundleRTS::add3DPoint - error ! numPts (%d) >= maxNumCams (%d)",
               numPts, maxNumPts);
    double* pMs = Ms.data + 3 * numPts;
    memcpy(pMs, pt3d->M, sizeof(double) * 3);

    mapPoints.push_back(pt3d);
    mapPointInd[pt3d] = numPts;
    int nRet = numPts;
    numPts++;
    return nRet;
}
Пример #12
0
int trackGetCorrespondenceNum(const Track2DPtrList& tks , int f1 , int f2) {
	if (f1 >= f2)
		repErr("tracks_get_corresponds_num() : f1 < f2 is required.");
	//get the number of valid tracks
	Track2DPtrList::const_iterator iter;
	int cn = 0;
	for (iter = tks.begin(); iter != tks.end(); ++iter) {
		Track2D* tk = *iter;
		if (tk->f1 <= f1 && tk->f2 >= f2)
			cn++;
	}
	return cn;
}
Пример #13
0
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//for debug
void SingleSLAM::saveCamPoses(const char* filePath, int startFrame) {
	FILE* fp = fopen(filePath, "w");
	if (!fp)
		repErr("SingleSLAM::saveCamPoses - cannot open '%s'!", filePath);

	for (CamPoseItem* cam = m_camPos.current(); cam && cam->f >= startFrame;
			cam = cam->pre) {
		fprintf(fp, "%d\t", cam->f);
		for (int i = 0; i < 9; i++)
			fprintf(fp, "%g ", cam->R[i]);
		fprintf(fp, "%g %g %g\n", cam->t[0], cam->t[1], cam->t[2]);
	}
	fclose(fp);
}
KeyPose* KeyPoseList::push_back(KeyPose* pose) {
	if (!pose)
	{
		printf("KeyPoseList::add() returned - !pose\n");
		return 0;
	}
	if (tail == 0) {
		head.next = pose;
		tail = pose;
	} else {
		if (pose->cam->f < tail->cam->f)
			repErr("KeyPoseList::add() returned - pose->cam->f < tail->cam->f");

		tail->next = pose;
		pose->pre = tail;
		tail = pose;
	}
	num++;
	return tail;
}
Пример #15
0
void GLScenePane::save(const char* file_path) {
	ofstream file(file_path);
	if (!file)
		repErr("cannot open '%s' to write!", file_path);

	file << m_pointSize << endl;
	file << m_followCamId << endl;
	file << m_camView << endl;
	file << m_autoScale << endl;
	file << m_drawDynTrj << endl;
	file << m_nTrjLen << endl;
	file << m_camSize << endl;
	file << m_center[0] << " " << m_center[1] << " " << m_center[2] << endl;
	file << m_scale << endl;

	for (int i = 0; i < 16; ++i) {
		file << _matrix[i] << endl;
	}
	cout << "display parameters have been saved in '" << file_path << "'."
			<< endl;
}
void BundleRTS::addKeyCamera(const double* KMat, CamPoseItem* camPos) {
    if (numCams > maxNumCams)
        repErr("BundleRTS::addCamera - error! numCams (%d) > maxNumCams (%d)",
               numCams, maxNumCams);

    int frame = camPos->f;
    int camId = camPos->camId;

    cameraInd[std::make_pair(frame, camId)] = numCams;

    double* pKs = Ks.data + 9 * numCams;
    double* pRs = Rs.data + 9 * numCams;
    double* pTs = Ts.data + 3 * numCams;

    memcpy(pKs, KMat, sizeof(double) * 9);
    memcpy(pRs, camPos->R, sizeof(double) * 9);
    memcpy(pTs, camPos->t, sizeof(double) * 3);

    numCams++;

    keyCamPoses.push_back(camPos);
}
void BundleRTS::_add2DPoint(MapPoint* pt3d, const FeaturePoint* featPt) {
    int ptInd;
    if (mapPointInd.count(pt3d) == 0)
        ptInd = add3DPoint(pt3d);
    else
        ptInd = mapPointInd[pt3d];

    std::pair<int, int> camFrameId = std::make_pair(featPt->f, featPt->camId);
    if (cameraInd.count(camFrameId) == 0)
        repErr(
            "BundleRTS::add2DPoint - cannot find the corresponding camera (frame:%d, camId:%d)",
            featPt->f, featPt->camId);
    else {
        //add this feature point
        int camInd = cameraInd[camFrameId];
        int ind = ptInd * numCams + camInd;
        ms.data[2 * ind] = featPt->x;
        ms.data[2 * ind + 1] = featPt->y;
        vmask.data[ind] = 1;
    }
    numMeas++;
}
Пример #18
0
int SingleSLAM::getUnMappedAndTrackedCorrespondence(int f1, int f2,
		std::vector<Track2DNode*>& nodes1, std::vector<Track2DNode*>& nodes2) {
	if (f1 >= f2)
		repErr("tracks_get_corresponds() : f1 < f2 is required.");

	int cn = 0;
	for (int i = 0; i < m_tracker.m_nMaxCorners; i++) {
		Track2D& tk = m_tracker.m_tks[i];
		if (tk.f1 <= f1 && tk.f2 >= f2 && !tk.tail->pt->mpt) {
			Track2DNode* p = tk.head.next;
			bool bStatic = true;
			while (p && bStatic) {
				if (p->pt->type == TYPE_FEATPOINT_DYNAMIC) {
					bStatic = false;
					break;
				}
				p = p->next;
			}
			if (bStatic && tk.head.next) {
				Track2DNode* head = 0;
				Track2DNode* tail = 0;
				for (p = tk.head.next; p; p = p->next) {
					if (p->f == f1)
						head = p;
					else if (p->f == f2)
						tail = p;
				}
				if (head && tail) {
					nodes1.push_back(head);
					nodes2.push_back(tail);
					cn++;
				}
			}
		}
	}
	return cn;
}
Пример #19
0
void SingleSLAM::saveFeatureTracks(const char* filePath, int minLen,
		int startFrame) {
	FILE* fp = fopen(filePath, "w");
	if (!fp)
		repErr("SingleSLAM::saveCamPoses -- cannot open '%s'!", filePath);

	for (int i = 0; i < m_tracker.m_nMaxCorners; i++) {
		Track2D& tk = m_tracker.m_tks[i];
		if (tk.empty() || tk.length() < minLen)
			continue;

		if (tk.tail->pt->type == TYPE_FEATPOINT_DYNAMIC)
			continue;

		Track2DNode* node = tk.tail;
		MapPoint* mpt = node->pt->mpt;
		if (mpt && mpt->isCertainStatic())
			fprintf(fp, "1 %g %g %g\n", mpt->M[0], mpt->M[1], mpt->M[2]);
		else
			fprintf(fp, "0 0 0 0\n");
		//output the feature points

		std::vector<FeaturePoint*> tmpFeatPts;
		for (FeaturePoint* featPt = node->pt; featPt && featPt->f >= startFrame;
				featPt = featPt->preFrame) {
			tmpFeatPts.push_back(featPt);
		}

		for (std::vector<FeaturePoint*>::reverse_iterator iter =
				tmpFeatPts.rbegin(); iter != tmpFeatPts.rend(); iter++) {
			FeaturePoint* featPt = *iter;
			fprintf(fp, "%d %g %g ", featPt->f, featPt->x, featPt->y);
		}
		fprintf(fp, "\n");
	}
	fclose(fp);
}
Пример #20
0
int trackGetCorrespondence(const Track2DPtrList& tks , int f1 , int f2 , std::vector<Track2DNode*>& nodes1 , std::vector<
		Track2DNode*>& nodes2) {
	if (f1 >= f2)
		repErr("tracks_get_corresponds() : f1 < f2 is required.");
	Track2DPtrList::const_iterator iter;

	int cn = 0;
	for (iter = tks.begin(); iter != tks.end(); ++iter) {
		Track2D* tk = *iter;
		if (tk->f1 <= f1 && tk->f2 >= f2) {
			Track2DNode* p = tk->head.next;
			while (p) {
				if (p->f == f1) {
					nodes1.push_back(p);
				} else if (p->f == f2) {
					nodes2.push_back(p);
				}
				p = p->next;
			}
			cn++;
		}
	}
	return cn;
}
Пример #21
0
void selectCameraOrder(int camNum, const int* distMat, Mat_i& order) {
	int maxVal = 0;
	int iMax = -1, jMax = -1;
	const int* pDistMat = distMat;
	// find the max value in the distMat at (iMax, jMax)
	for (int i = 0; i < camNum; i++) {
		for (int j = i; j < camNum; j++) {
			if (pDistMat[j] > maxVal) {
				maxVal = pDistMat[j];
				iMax = i;
				jMax = j;
			}
		}
		pDistMat += camNum;
	}

	Mat_uc flag(camNum, 1);
	flag.fill(0);
	flag[iMax] = 1;
	flag[jMax] = 1;

	std::list<int> cams;
	cams.push_back(iMax);
	cams.push_back(jMax);

	while (maxVal > 0) {
		int iHead = cams.front();
		int iTail = cams.back();

		int iMaxHead = -1;
		int maxValHead = 0;
		pDistMat = distMat + camNum * iHead;
		for (int j = 0; j < camNum; j++) {
			if (flag[j] > 0)
				continue;
			else if (maxValHead < pDistMat[j]) {
				maxValHead = pDistMat[j];
				iMaxHead = j;
			}
		}

		int iMaxTail = -1;
		int maxValTail = 0;
		pDistMat = distMat + camNum * iTail;
		for (int j = 0; j < camNum; j++) {
			if (flag[j] > 0)
				continue;
			else if (maxValTail < pDistMat[j]) {
				maxValTail = pDistMat[j];
				iMaxTail = j;
			}
		}
		if (iMaxHead < 0 && iMaxTail < 0) {
			break;
		}

		if (maxValHead > maxValTail) {
			cams.push_front(iMaxHead);
			flag[iMaxHead] = 1;
		} else if (maxValHead < maxValTail) {
			cams.push_back(iMaxTail);
			flag[iMaxTail] = 1;
		}

	}

	order.resize(cams.size(), 1);

	int k = 0;
	std::list<int>::iterator iter;
	for (iter = cams.begin(); iter != cams.end(); iter++) {
		order.data[k++] = *iter;
	}
	order.rows = k;
	if (order.rows != camNum) {
		repErr("selectCameraOrder - not all camera have the intersection of views");
	}
}
Пример #22
0
int SingleSLAM::fastPoseUpdate3D() {
	propagateFeatureStates();
//get the feature points corresponding to the map points
	std::vector<Track2DNode*> nodes;
	int num = getStaticMappedTrackNodes(nodes);
	if (num < 5) {
		repErr(
				"[camera id:%d]intra-camera pose update failed! less than five static map points (%d)",
				camId, num);
		return -1;
	}

//choose the feature points for pose estimation
	std::vector<FeaturePoint*> featPts;
	int iChoose = chooseStaticFeatPts(featPts);
//test
	logInfo("number of chosen features :%d\n", iChoose);

	std::vector<FeaturePoint*> mappedFeatPts;
	std::vector<FeaturePoint*> unmappedFeatPts;

	mappedFeatPts.reserve(nRowBlk * nColBlk * 2);
	unmappedFeatPts.reserve(nRowBlk * nColBlk * 2);

	for (size_t i = 0; i < featPts.size(); i++) {
		if (featPts[i]->mpt)
			mappedFeatPts.push_back(featPts[i]);
		else if (featPts[i]->preFrame)
			unmappedFeatPts.push_back(featPts[i]);
	}

	//get the 2D-3D corresponding points
	int n3D2Ds = mappedFeatPts.size();
	Mat_d ms(n3D2Ds, 2), Ms(n3D2Ds, 3), repErrs(n3D2Ds, 1);
	for (int i = 0; i < n3D2Ds; i++) {
		FeaturePoint* fp = mappedFeatPts[i];
		ms.data[2 * i] = fp->x;
		ms.data[2 * i + 1] = fp->y;

		Ms.data[3 * i] = fp->mpt->x;
		Ms.data[3 * i + 1] = fp->mpt->y;
		Ms.data[3 * i + 2] = fp->mpt->z;

		repErrs.data[i] = fp->reprojErr;
	}

	//get the 2D-2D corresponding points
	int n2D2Ds = unmappedFeatPts.size();
	Mat_d ums(n2D2Ds, 2), umspre(n2D2Ds, 2), Rpre(n2D2Ds, 9), tpre(n2D2Ds, 3);
	for (int i = 0; i < n2D2Ds; i++) {
		FeaturePoint* fp = unmappedFeatPts[i];

		ums.data[2 * i] = fp->x;
		ums.data[2 * i + 1] = fp->y;

		//travel back to the frame of the first appearance
		//while (fp->preFrame) {
		fp = fp->preFrame;
		//}
		assert(fp);

		umspre.data[2 * i] = fp->x;
		umspre.data[2 * i + 1] = fp->y;

		doubleArrCopy(Rpre.data, i, fp->cam->R, 9);
		doubleArrCopy(tpre.data, i, fp->cam->t, 3);
	}

//estimate the camera pose using both 2D-2D and 3D-2D correspondences
	double R[9], t[3];
	double* cR = m_camPos.current()->R;
	double* cT = m_camPos.current()->t;

//	//test
//	logInfo("==============start of camId:%d=================\n", camId);
//	logInfo("n3D2D:%d, n2D2D:%d\n", n3D2Ds, n2D2Ds);
//
//	write(K, "/home/tsou/data/K.txt");
//	write(cR, 3, 3, "/home/tsou/data/%d_R0.txt", camId);
//	write(cT, 3, 1, "/home/tsou/data/%d_T0.txt", camId);
//	write(repErrs, "/home/tsou/data/%d_errs.txt", camId);
//	write(Ms, "/home/tsou/data/%d_Ms.txt", camId);
//	write(ms, "/home/tsou/data/%d_ms.txt", camId);
//	write(Rpre, "/home/tsou/data/%d_Rpre.txt", camId);
//	write(tpre, "/home/tsou/data/%d_tpre.txt", camId);
//	write(umspre, "/home/tsou/data/%d_umspre.txt", camId);
//	write(ums, "/home/tsou/data/%d_ums.txt", camId);
//
//	//test
//	printMat(3, 3, cR);
//	printMat(3, 1, cT);

	IntraCamPoseOption opt;
	double R_tmp[9], t_tmp[3];
	intraCamEstimate(K.data, cR, cT, n3D2Ds, repErrs.data, Ms.data, ms.data, 6,
			R_tmp, t_tmp, &opt);

	if (getCameraDistance(R_tmp, t_tmp, Rpre.data, tpre.data) > 1000) {
		opt.verboseLM = 1;
		intraCamEstimateEpi(K.data, R_tmp, t_tmp, n3D2Ds, repErrs.data, Ms.data,
				ms.data, n2D2Ds, 0, Rpre.data, tpre.data, umspre.data, ums.data,
				6, R, t, &opt);
	} else {
		doubleArrCopy(R, 0, R_tmp, 9);
		doubleArrCopy(t, 0, t_tmp, 3);
	}

//	printMat(3, 3, cR);
//	printMat(3, 1, cT);
//	printMat(3, 3, R);
//	printMat(3, 1, cT);
//	logInfo("==============end of camId:%d=================\n", camId);
//	intraCamEstimate(K.data,cR,cT,n3D2Ds, repErrs.data,Ms.data,ms.data,6.0,R,t,&opt);
//	find outliers

	int numOut = 0;
	double rm[2], var[4], ivar[4];
	for (int i = 0; i < num; i++) {
		double* pM = nodes[i]->pt->mpt->M;
		double* pCov = nodes[i]->pt->mpt->cov;
		project(K, R, t, pM, rm);
		getProjectionCovMat(K, R, t, pM, pCov, var, Const::PIXEL_ERR_VAR);
		mat22Inv(var, ivar);
		double err = mahaDist2(rm, nodes[i]->pt->m, ivar);
		if (err < 1) { //inlier
			nodes[i]->pt->reprojErr = err;
			seqTriangulate(K, R, t, nodes[i]->pt->m, pM, pCov,
					Const::PIXEL_ERR_VAR);
			project(K, R, t, pM, rm);
			getProjectionCovMat(K, R, t, pM, pCov, var, Const::PIXEL_ERR_VAR);
			mat22Inv(var, ivar);
			err = mahaDist2(rm, nodes[i]->pt->m, ivar);
			if (err >= 1) {
				nodes[i]->pt->mpt->setFalse();
			}
		} else {
			//outliers
			numOut++;
			double repErr = dist2(rm, nodes[i]->pt->m);
			nodes[i]->pt->reprojErr = repErr;
			nodes[i]->pt->mpt->setUncertain();
		}
	}
	CamPoseItem* camPos = m_camPos.add(currentFrame(), camId, R, t);
	updateCamParamForFeatPts(K, camPos);

	return num;
}
void BundleRTS::run(int nPtsCon, int nCamsCon, int maxIter) {
    refineInput(_mapPts);

    numPtsCon = nPtsCon;
    numCamsCon = nCamsCon;

    compressMeasurements();

    const int cnp = 11; //5:intrinsic parameters 6:extrinsic parameters
    const int pnp = 3;
    const int mnp = 2;

    m_globs.cnp = cnp;
    m_globs.pnp = pnp;
    m_globs.mnp = mnp;

    if (m_globs.rot0params) {
        delete[] m_globs.rot0params;
    }
    m_globs.rot0params = new double[FULLQUATSZ * numCams];

    //set initial camera parameters
    for (int i = 0; i < numCams; ++i) {
        mat2quat(Rs + 9 * i, m_globs.rot0params + 4 * i);
    }

    m_globs.intrcalib = 0;
    m_globs.nccalib = 5;

    m_globs.camparams = 0;
    m_globs.ptparams = 0;

    /* call sparse LM routine */
    double opts[SBA_OPTSSZ];
    opts[0] = SBA_INIT_MU * 1E-4;
    opts[1] = SBA_STOP_THRESH;
    opts[2] = SBA_STOP_THRESH;
    opts[3] = 0; //0.05 * numMeas; // uncomment to force termination if the average reprojection error drops below 0.05
    opts[4] = 1E-16; // uncomment to force termination if the relative reduction in the RMS reprojection error drops below 1E-05

    if (m_paramVec)
        delete[] m_paramVec;
    m_paramVec = new double[numCams * cnp + numPts * pnp];

    double * pParamVec = m_paramVec;
    double* pKs = Ks.data;
    double* pTs = Ts.data;
    for (int i = 0; i < numCams; ++i) {
        pParamVec[0] = pKs[0];
        pParamVec[1] = pKs[2];
        pParamVec[2] = pKs[5];
        pParamVec[3] = pKs[4] / pKs[0];
        pParamVec[4] = pKs[1];

        pParamVec[5] = 0;
        pParamVec[6] = 0;
        pParamVec[7] = 0;
        pParamVec[8] = pTs[0];
        pParamVec[9] = pTs[1];
        pParamVec[10] = pTs[2];

        pParamVec += cnp;
        pKs += 9;
        pTs += 3;
    }
    double* pParamPoints = m_paramVec + numCams * cnp;
    memcpy(pParamPoints, Ms.data, numPts * 3 * sizeof(double));

    double sbaInfo[SBA_INFOSZ];
    if (sba_motstr_levmar_x(numPts, numPtsCon, numCams, numCamsCon, vmask,
                            m_paramVec, cnp, pnp, ms.data, 0, mnp, img_projsKRTS_x,
                            img_projsKRTS_jac_x, (void *) (&m_globs), maxIter, 0, opts,
                            sbaInfo) == SBA_ERROR) {
        //for debug
        //save the bundle data for debug


        repErr("bundle adjustment failed!\n");
    }
    //test
    logInfo(
        "initial error:%lf, final error:%lf #iterations:%lf stop reason:%lf\n",
        sqrt(sbaInfo[0] / numMeas), sqrt(sbaInfo[1] / numMeas), sbaInfo[5],
        sbaInfo[6]);
}