コード例 #1
0
void NewMapPtsNCC::output() {
	decidePointType();
	for (size_t i = 0; i < newMapPts.size(); i++) {
		MapPoint* mpt = newMapPts[i];
		for (int c = 0; c < numCams; c++) {
			int camId = m_camGroup.camIds[c];
			if (mpt->pFeatures[camId]) {
				for (FeaturePoint* fp = mpt->pFeatures[camId]; fp->nextFrame;
						fp = fp->nextFrame) {
					fp->mpt = mpt;
					mpt->lastFrame =
							fp->f > mpt->lastFrame ? fp->f : mpt->lastFrame;

					mpt->nccBlks[camId].computeScaled(
							pCoSLAM->slam[camId].m_smallImg,
							pCoSLAM->slam[camId].m_smallScale, fp->x, fp->y);

					int x = max(0, min((int) fp->x, pCoSLAM->slam[camId].m_img.w));
					int y = max(0, min((int) fp->y, pCoSLAM->slam[camId].m_img.h));

					mpt->setColor(pCoSLAM->slam[camId].m_rgb(x,y));
				}
			}
		}
		if (newMapPts[i]->lastFrame < pCoSLAM->curFrame) {
			pCoSLAM->actMapPts.add(mpt);
		} else
			pCoSLAM->curMapPts.add(mpt);
	}
}
コード例 #2
0
ファイル: SL_SingleSLAM.cpp プロジェクト: danping/CoSLAM
void SingleSLAM::propagateFeatureStates() {
	for (int i = 0; i < m_tracker.m_nMaxCorners; i++) {
		Track2D& tk = m_tracker.m_tks[i];
		if (tk.empty())
			continue;
		Track2DNode* node = tk.tail;
		if (node->pre) {
			//propagate the type of feature points
			node->pt->type = node->pre->pt->type;
			if (node->pre->pt->mpt) {
				MapPoint* pMapPt = node->pre->pt->mpt;
				if (pMapPt->state != STATE_MAPPOINT_CURRENT)
					continue;
				//map correspondence propagation
				if (!pMapPt->isFalse()) {
					pMapPt->pFeatures[camId] = node->pt;
					node->pt->mpt = pMapPt;
					pMapPt->lastFrame = currentFrame();
					if (pMapPt->isCertainStatic())
						pMapPt->staticFrameNum++;
					node->pt->reprojErr = node->pre->pt->reprojErr;
				}
			}
		}
	}
}
コード例 #3
0
int NewMapPtsNCC::getSeedsBetween(MapPointList& mapPts, int iCam, int jCam,
		Mat_d& seed1, Mat_d& seed2) {
	MapPoint* pHead = mapPts.getHead();
	if (!pHead)
		repErr("SLAM failed - No map point can be found!\n");
	std::vector<FeaturePoint*> vecFeatPts1, vecFeatPts2;
	for (MapPoint* p = pHead; p; p = p->next) {
		//if ((p->iLocalType == TYPE_MAPPOINT_STATIC || p->iLocalType == TYPE_MAPPOINT_DYNAMIC) && p->numVisCam >= 2 && p->pFeatures[iCam] && p->pFeatures[iCam]->f == m_curFrame && p->pFeatures[jCam] && p->pFeatures[jCam]->f == m_curFrame) {
		if (!p->isFalse() && !p->isUncertain() && p->numVisCam >= 2
				&& p->pFeatures[iCam] && p->pFeatures[iCam]->f == m_curFrame
				&& p->pFeatures[jCam] && p->pFeatures[jCam]->f == m_curFrame) {
			vecFeatPts1.push_back(p->pFeatures[iCam]);
			vecFeatPts2.push_back(p->pFeatures[jCam]);
		}
	}
	if (vecFeatPts1.empty())
		return 0;

	seed1.resize(vecFeatPts1.size(), 2);
	seed2.resize(vecFeatPts2.size(), 2);

	for (size_t i = 0; i < vecFeatPts1.size(); i++) {
		seed1.data[2 * i] = vecFeatPts1[i]->x;
		seed1.data[2 * i + 1] = vecFeatPts1[i]->y;
		seed2.data[2 * i] = vecFeatPts2[i]->x;
		seed2.data[2 * i + 1] = vecFeatPts2[i]->y;
	}
	return seed1.rows;
}
コード例 #4
0
ファイル: WrldCont.cpp プロジェクト: jleclanche/darkdust-ctp2
void World::FindContinentNeighbors()
{
    sint16 center_cont;
    MapPoint center;
    MapPoint test_cont;
    bool is_land = false;

    AllocateNeighborMem();

    for(center.x=0; center.x<m_size.x; center.x++) {
        for(center.y=0; center.y<m_size.y; center.y++) {

            GetContinent(center, center_cont, is_land);

            Assert(0 <= center_cont);
            if (center_cont < 0) continue;

            if(center.GetNeighborPosition(NORTHWEST, test_cont))
				FindAContinentNeighbor(center_cont, test_cont, is_land);

            if(center.GetNeighborPosition(WEST, test_cont))
				FindAContinentNeighbor(center_cont, test_cont, is_land);

            if(center.GetNeighborPosition(SOUTHWEST, test_cont))
				FindAContinentNeighbor(center_cont, test_cont, is_land);

            if(center.GetNeighborPosition(SOUTH, test_cont))
				FindAContinentNeighbor(center_cont, test_cont, is_land);
        }
    }
}
コード例 #5
0
int PointRegister::process(double pixelErrVar) {
	/*==============(critical section)=================*/
	enterBACriticalSection();
	bool empty = slam.actMapPts.empty();
	leaveBACriticalSection();
	if (empty)
		return 0;
	/*------------------------------------------------*/
	vector<MapPoint*> vecActMapPts;
	vecActMapPts.reserve(slam.actMapPts.size() * 2);

	/*==============(critical section)=================*/
	enterBACriticalSection();
	typedef std::list<MapPoint*>::iterator MapPointListIter;
	for (MapPointListIter iter = slam.actMapPts.begin();
			iter != slam.actMapPts.end(); iter++) {
		MapPoint* p = *iter;
		vecActMapPts.push_back(p);
	}
	slam.actMapPts.clear();
	slam.nActMapPts = (int) vecActMapPts.size();
	leaveBACriticalSection();
	/*------------------------------------------------*/

	/*get the mask and unmapped feature points for registration*/
	genMappedMask();
	getUnMappedFeatPoints();

	vector<bool> flagReged;
	flagReged.resize((size_t) slam.nActMapPts, false);
	/*==============(critical section)=================*/
	enterBACriticalSection();
	nRegTried = 0;
	nRegDeepTried = 0;
	for (int i = 0; i < slam.nActMapPts; i++) {
		MapPoint* p = vecActMapPts[i];
		if (!p->isFalse()) {
			flagReged[i] = regOnePoint(p, pixelErrVar);
		}
	}
	leaveBACriticalSection();
	/*------------------------------------------------*/

	/*==============(critical section)=================*/
	enterBACriticalSection();
	int nReg = 0;
	for (size_t i = 0; i < slam.nActMapPts; i++) {
		MapPoint* p = vecActMapPts[i];
		if (flagReged[i] == false) {
			slam.actMapPts.push_back(p);
		} else {
			p->lastFrame = slam.curFrame;
			p->state = STATE_MAPPOINT_CURRENT;
			slam.curMapPts.push_back(p);
			nReg++;
		}
	}
	leaveBACriticalSection();
	return nReg;
}
コード例 #6
0
ファイル: KeyFrame.cpp プロジェクト: egoist-sx/ORB_SLAM_iOS
 float KeyFrame::ComputeSceneMedianDepth(int q)
 {
     vector<MapPoint*> vpMapPoints;
     cv::Mat Tcw_;
     {
         boost::mutex::scoped_lock lock(mMutexFeatures);
         boost::mutex::scoped_lock lock2(mMutexPose);
         vpMapPoints = mvpMapPoints;
         Tcw_ = Tcw.clone();
     }
     
     vector<float> vDepths;
     vDepths.reserve(mvpMapPoints.size());
     cv::Mat Rcw2 = Tcw_.row(2).colRange(0,3);
     Rcw2 = Rcw2.t();
     float zcw = Tcw_.at<float>(2,3);
     for(size_t i=0; i<mvpMapPoints.size(); i++)
     {
         if(mvpMapPoints[i])
         {
             MapPoint* pMP = mvpMapPoints[i];
             cv::Mat x3Dw = pMP->GetWorldPos();
             float z = Rcw2.dot(x3Dw)+zcw;
             vDepths.push_back(z);
         }
     }
     
     sort(vDepths.begin(),vDepths.end());
     
     return vDepths[(vDepths.size()-1)/q];
 }
コード例 #7
0
 void LocalMapping::KeyFrameCulling()
 {
     // Check redundant keyframes (only local keyframes)
     // A keyframe is considered redundant if the 90% of the MapPoints it sees, are seen
     // in at least other 3 keyframes (in the same or finer scale)
     vector<KeyFrame*> vpLocalKeyFrames = mpCurrentKeyFrame->GetVectorCovisibleKeyFrames();
     
     for(vector<KeyFrame*>::iterator vit=vpLocalKeyFrames.begin(), vend=vpLocalKeyFrames.end(); vit!=vend; vit++)
     {
         KeyFrame* pKF = *vit;
         if(pKF->mnId==0)
             continue;
         vector<MapPoint*> vpMapPoints = pKF->GetMapPointMatches();
         
         int nRedundantObservations=0;
         int nMPs=0;
         for(size_t i=0, iend=vpMapPoints.size(); i<iend; i++)
         {
             MapPoint* pMP = vpMapPoints[i];
             if(pMP)
             {
                 if(!pMP->isBad())
                 {
                     nMPs++;
                     if(pMP->Observations()>3)
                     {
                         int scaleLevel = pKF->GetKeyPointUn(i).octave;
                         map<KeyFrame*, size_t> observations = pMP->GetObservations();
                         int nObs=0;
                         for(map<KeyFrame*, size_t>::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++)
                         {
                             KeyFrame* pKFi = mit->first;
                             if(pKFi==pKF)
                                 continue;
                             int scaleLeveli = pKFi->GetKeyPointUn(mit->second).octave;
                             if(scaleLeveli<=scaleLevel+1)
                             {
                                 nObs++;
                                 if(nObs>=3)
                                     break;
                             }
                         }
                         if(nObs>=3)
                         {
                             nRedundantObservations++;
                         }
                     }
                 }
             }
         }
         
         if(nRedundantObservations>0.9*nMPs)
             pKF->SetBadFlag();
     }
 }
コード例 #8
0
ファイル: KeyFrame.cpp プロジェクト: egoist-sx/ORB_SLAM_iOS
 float KeyFrame::ComputeSceneMedianDepthWithCoord(int &x, int &y, int q) {
     
     //vector<MapPoint*> vpMapPoints;
     cv::Mat Tcw_;
     {
         boost::mutex::scoped_lock lock(mMutexFeatures);
         boost::mutex::scoped_lock lock2(mMutexPose);
         //vpMapPoints = mvpMapPoints;
         Tcw_ = Tcw.clone();
     }
     
     vector<float> vDepths;
     vDepths.reserve(mvpMapPoints.size());
     cv::Mat Rcw2 = Tcw_.row(2).colRange(0,3);
     Rcw2 = Rcw2.t();
     float zcw = Tcw_.at<float>(2,3);
     for(size_t i=0; i<mvpMapPoints.size(); i++)
     {
         if(mvpMapPoints[i])
         {
             MapPoint* pMP = mvpMapPoints[i];
             cv::Mat x3Dw = pMP->GetWorldPos();
             float z = Rcw2.dot(x3Dw)+zcw;
             vDepths.push_back(z);
         }
     }
     
     vector<size_t> depthIndex(vDepths.size());
     for (size_t i = 0; i != depthIndex.size(); ++i) depthIndex[i] = i;
     
     sort(vDepths.begin(),vDepths.end());
     sort(depthIndex.begin(), depthIndex.end(),
          [&vDepths](size_t i1, size_t i2) {return vDepths[i1] < vDepths[i2];});
     
     size_t medianIndex = depthIndex[(vDepths.size()-1)/q];
     size_t ptCounter = 0;
     MapPoint* pointWithMedianDepth;
     for (size_t i = 0; i<mvpMapPoints.size(); i++) {
         if (mvpMapPoints[i]) {
             if (ptCounter == medianIndex) {
                 pointWithMedianDepth = mvpMapPoints[i];
                 break;
             }
             ptCounter++;
         }
     }
     if (pointWithMedianDepth) {
         size_t keysUnIdx = pointWithMedianDepth->GetIndexInKeyFrame(this);
         x = mvKeysUn[keysUnIdx].pt.x;
         y = mvKeysUn[keysUnIdx].pt.y;
     }
     
     return vDepths[(vDepths.size()-1)/q];
 }
コード例 #9
0
 void LocalMapping::ProcessNewKeyFrame()
 {
     {
         boost::mutex::scoped_lock lock(mMutexNewKFs);
         mpCurrentKeyFrame = mlNewKeyFrames.front();
         mlNewKeyFrames.pop_front();
     }
     
     // Compute Bags of Words structures
     mpCurrentKeyFrame->ComputeBoW();
     
     if(mpCurrentKeyFrame->mnId==0)
         return;
     
     // Associate MapPoints to the new keyframe and update normal and descriptor
     vector<MapPoint*> vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
     if(mpCurrentKeyFrame->mnId>1) //This operations are already done in the tracking for the first two keyframes
     {
         for(size_t i=0; i<vpMapPointMatches.size(); i++)
         {
             MapPoint* pMP = vpMapPointMatches[i];
             if(pMP)
             {
                 if(!pMP->isBad())
                 {
                     pMP->AddObservation(mpCurrentKeyFrame, i);
                     pMP->UpdateNormalAndDepth();
                     pMP->ComputeDistinctiveDescriptors();
                 }
             }
         }
     }
     
     if(mpCurrentKeyFrame->mnId==1)
     {
         for(size_t i=0; i<vpMapPointMatches.size(); i++)
         {
             MapPoint* pMP = vpMapPointMatches[i];
             if(pMP)
             {
                 mlpRecentAddedMapPoints.push_back(pMP);
             }
         }
     }
     
     // Update links in the Covisibility Graph
     mpCurrentKeyFrame->UpdateConnections();
     
     // Insert Keyframe in Map
     mpMap->AddKeyFrame(mpCurrentKeyFrame);
 }
コード例 #10
0
ファイル: KeyFrame.cpp プロジェクト: egoist-sx/ORB_SLAM_iOS
 set<MapPoint*> KeyFrame::GetMapPoints()
 {
     boost::mutex::scoped_lock lock(mMutexFeatures);
     set<MapPoint*> s;
     for(size_t i=0, iend=mvpMapPoints.size(); i<iend; i++)
     {
         if(!mvpMapPoints[i])
             continue;
         MapPoint* pMP = mvpMapPoints[i];
         if(!pMP->isBad())
             s.insert(pMP);
     }
     return s;
 }
コード例 #11
0
ファイル: MapMakerServerBase.cpp プロジェクト: wavelab/mcptam
//  Adds map points at a given initial depth in a specified pyramid level
void MapMakerServerBase::AddInitDepthMapPoints(MultiKeyFrame& mkfSrc, int nLevel, int nLimit, double dInitDepth)
{
  for (KeyFramePtrMap::iterator it = mkfSrc.mmpKeyFrames.begin(); it != mkfSrc.mmpKeyFrames.end(); it++)
  {
    KeyFrame& kfSrc = *(it->second);
    TaylorCamera& cameraSrc = mmCameraModels[kfSrc.mCamName];
    Level& level = kfSrc.maLevels[nLevel];
    ThinCandidates(kfSrc, nLevel);

    int nLevelScale = LevelScale(nLevel);

    std::cout << "AddInitDepthMapPoints, processing " << level.vCandidates.size() << " candidates" << std::endl;

    for (unsigned int i = 0; i < level.vCandidates.size() && static_cast<int>(i) < nLimit; ++i)
    {
      TooN::Vector<2> v2RootPos = LevelZeroPos(level.vCandidates[i].irLevelPos, nLevel);
      TooN::Vector<3> v3UnProj = cameraSrc.UnProject(v2RootPos) *
                           dInitDepth;  // This unprojects the noisy image location to a depth of dInitDepth

      MapPoint* pPointNew = new MapPoint;
      pPointNew->mv3WorldPos = kfSrc.mse3CamFromWorld.inverse() * v3UnProj;
      pPointNew->mpPatchSourceKF = &kfSrc;
      pPointNew->mbFixed = false;
      pPointNew->mnSourceLevel = nLevel;
      pPointNew->mv3Normal_NC = TooN::makeVector(0, 0, -1);
      pPointNew->mirCenter = level.vCandidates[i].irLevelPos;
      pPointNew->mv3Center_NC = cameraSrc.UnProject(v2RootPos);
      pPointNew->mv3OneRightFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(nLevelScale, 0)));
      pPointNew->mv3OneDownFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(0, nLevelScale)));

      normalize(pPointNew->mv3Center_NC);
      normalize(pPointNew->mv3OneDownFromCenter_NC);
      normalize(pPointNew->mv3OneRightFromCenter_NC);

      pPointNew->RefreshPixelVectors();

      mMap.mlpPoints.push_back(pPointNew);

      Measurement* pMeas = new Measurement;
      pMeas->v2RootPos = v2RootPos;
      pMeas->nLevel = nLevel;
      pMeas->eSource = Measurement::SRC_ROOT;

      kfSrc.AddMeasurement(pPointNew, pMeas);
      // kfSrc.mmpMeasurements[pPointNew] = pMeas;
      // pPointNew->mMMData.spMeasurementKFs.insert(&kfSrc);
    }
  }
}
コード例 #12
0
ファイル: WrldCont.cpp プロジェクト: jleclanche/darkdust-ctp2
void World::GrowLand(MapPoint const & start)
{
    MapPointNode * search_list   = NULL;
    AddToLandSearch(search_list, start);

    MapPoint       test_pos;
    MapPoint       center;
    MapPointNode * finished_list = NULL;
#ifdef _DEBUG
    sint32         finite_loop   = 0;
#endif

    while (NextPoint(search_list, finished_list, center))
    {
		Assert(++finite_loop < 100000);

		if(center.GetNeighborPosition(NORTH, test_pos))
			AddToLandSearch(search_list, test_pos);
		if(center.GetNeighborPosition(NORTHEAST, test_pos))
			AddToLandSearch(search_list, test_pos);
		if(center.GetNeighborPosition(NORTHWEST, test_pos))
			AddToLandSearch(search_list, test_pos);

		if(center.GetNeighborPosition(EAST, test_pos))
			AddToLandSearch(search_list, test_pos);
		if(center.GetNeighborPosition(WEST, test_pos))
			AddToLandSearch(search_list, test_pos);

		if(center.GetNeighborPosition(SOUTH, test_pos))
			AddToLandSearch(search_list, test_pos);
		if(center.GetNeighborPosition(SOUTHEAST, test_pos))
			AddToLandSearch(search_list, test_pos);
		if(center.GetNeighborPosition(SOUTHWEST, test_pos))
			AddToLandSearch(search_list, test_pos);
    }

#ifdef _DEBUG
    finite_loop=0;
#endif

    while (finished_list)
    {
        Assert(++finite_loop < 100000);

        MapPointNode * ptr = finished_list;
        finished_list = finished_list->next;

        GetCell(ptr->pos)->SetContinent(m_land_continent_max);
        delete ptr;
    }

    m_land_continent_max++;
    Assert(LAND_CONTINENT_START < m_land_continent_max);
}
コード例 #13
0
ファイル: GameWorldView.cpp プロジェクト: vader1986/s25client
void GameWorldView::MoveToMapPt(const MapPoint pt)
{
    if(!pt.isValid())
        return;

    lastOffset = offset;
    Point<int> nodePos = GetWorld().GetNodePos(pt);

    MoveTo(nodePos - DrawPoint(GetSize() / 2), true);
}
コード例 #14
0
BOOST_FIXTURE_TEST_CASE(HQPlacement, WorldFixtureEmpty1P)
{
    GamePlayer& player = world.GetPlayer(0);
    BOOST_REQUIRE(player.isUsed());
    const MapPoint hqPos = player.GetHQPos();
    BOOST_REQUIRE(hqPos.isValid());
    BOOST_REQUIRE_EQUAL(world.GetNO(player.GetHQPos())->GetGOT(), GOT_NOB_HQ);
    // Check ownership of points
    std::vector<MapPoint> ownerPts = world.GetPointsInRadius(hqPos, HQ_RADIUS);
    BOOST_FOREACH(MapPoint pt, ownerPts)
    {
        // This should be ensured by `GetPointsInRadius`
        BOOST_REQUIRE_LE(world.CalcDistance(pt, hqPos), HQ_RADIUS);
        // We must own this point
        BOOST_REQUIRE_EQUAL(world.GetNode(pt).owner, 1);
        // Points at radius are border nodes, others player territory
        if(world.CalcDistance(pt, hqPos) == HQ_RADIUS)
            BOOST_REQUIRE(world.IsBorderNode(pt, 1));
        else
            BOOST_REQUIRE(world.IsPlayerTerritory(pt));
    }
コード例 #15
0
 void LocalMapping::MapPointCulling()
 {
     // Check Recent Added MapPoints
     list<MapPoint*>::iterator lit = mlpRecentAddedMapPoints.begin();
     const unsigned long int nCurrentKFid = mpCurrentKeyFrame->mnId;
     while(lit!=mlpRecentAddedMapPoints.end())
     {
         MapPoint* pMP = *lit;
         if(pMP->isBad())
         {
             lit = mlpRecentAddedMapPoints.erase(lit);
         }
         else if(pMP->GetFoundRatio()<0.25f )
         {
             pMP->SetBadFlag();
             lit = mlpRecentAddedMapPoints.erase(lit);
         }
         else if((nCurrentKFid-pMP->mnFirstKFid)>=2 && pMP->Observations()<=2)
         {
             
             pMP->SetBadFlag();
             lit = mlpRecentAddedMapPoints.erase(lit);
         }
         else if((nCurrentKFid-pMP->mnFirstKFid)>=3)
             lit = mlpRecentAddedMapPoints.erase(lit);
         else
             lit++;
     }
 }
コード例 #16
0
void BundleRTS::updateNewPosesPoints() {
    //update the points
    for (MapPoint* mpt = pCoSLAM->curMapPts.getHead(); mpt; mpt = mpt->next) {
        if (mpt->lastFrame <= firstKeyFrame->f)
            continue;
//		if (mpt->bLocalDyn == TYPE_MAPPOINT_STATIC || mpt->bLocalDyn == TYPE_MAPPOINT_UNCERTAIN
//		)
        if (mpt->isLocalDynamic())
            updateStaticPointPosition(pCoSLAM->numCams, mpt,
                                      Const::PIXEL_ERR_VAR, false);
        else if (mpt->isLocalDynamic())
            updateDynamicPointPosition(pCoSLAM->numCams, mpt,
                                       Const::PIXEL_ERR_VAR, false);
    }
    for (MapPoint* mpt = pCoSLAM->actMapPts.getHead(); mpt; mpt = mpt->next) {
        if (mpt->lastFrame <= firstKeyFrame->f)
            continue;
//		if (mpt->bLocalDyn == TYPE_MAPPOINT_STATIC || mpt->bLocalDyn == TYPE_MAPPOINT_UNCERTAIN
//		)
        if (mpt->isLocalStatic())
            updateStaticPointPosition(pCoSLAM->numCams, mpt,
                                      Const::PIXEL_ERR_VAR, false);
        else if (mpt->isLocalDynamic())
            updateDynamicPointPosition(pCoSLAM->numCams, mpt,
                                       Const::PIXEL_ERR_VAR, false);
    }
}
コード例 #17
0
ファイル: StereoFrame.cpp プロジェクト: snooble/sptam
bool StereoFrame::canView(const MapPoint& mapPoint) const
{
  boost::shared_lock<boost::shared_mutex> lock(stereo_frames_mutex_);

  const Eigen::Vector3d& point = mapPoint.GetPosition();

  // compute only once, since both cameras have the same orientation.
  bool similar_angle = false;
  {
    Eigen::Vector3d currentNormal = point - frameLeft_.GetPosition();
    currentNormal.normalize();

    // angle is in radians
    double angle = std::acos( ( mapPoint.GetNormal() ).dot( currentNormal ) );

    // Discard Points which were created from a greater 45 degrees pint of view.
    // TODO: pass this threshold as a parameter.
    similar_angle = angle < (M_PI / 4.0);
  }

  return similar_angle and ( frameLeft_.GetCamera().CanView( point ) or frameRight_.GetCamera().CanView( point ) );
}
コード例 #18
0
ファイル: SL_SingleSLAM.cpp プロジェクト: danping/CoSLAM
void SingleSLAM::saveFeatureTracks(const char* filePath, int minLen,
		int startFrame) {
	FILE* fp = fopen(filePath, "w");
	if (!fp)
		repErr("SingleSLAM::saveCamPoses -- cannot open '%s'!", filePath);

	for (int i = 0; i < m_tracker.m_nMaxCorners; i++) {
		Track2D& tk = m_tracker.m_tks[i];
		if (tk.empty() || tk.length() < minLen)
			continue;

		if (tk.tail->pt->type == TYPE_FEATPOINT_DYNAMIC)
			continue;

		Track2DNode* node = tk.tail;
		MapPoint* mpt = node->pt->mpt;
		if (mpt && mpt->isCertainStatic())
			fprintf(fp, "1 %g %g %g\n", mpt->M[0], mpt->M[1], mpt->M[2]);
		else
			fprintf(fp, "0 0 0 0\n");
		//output the feature points

		std::vector<FeaturePoint*> tmpFeatPts;
		for (FeaturePoint* featPt = node->pt; featPt && featPt->f >= startFrame;
				featPt = featPt->preFrame) {
			tmpFeatPts.push_back(featPt);
		}

		for (std::vector<FeaturePoint*>::reverse_iterator iter =
				tmpFeatPts.rbegin(); iter != tmpFeatPts.rend(); iter++) {
			FeaturePoint* featPt = *iter;
			fprintf(fp, "%d %g %g ", featPt->f, featPt->x, featPt->y);
		}
		fprintf(fp, "\n");
	}
	fclose(fp);
}
コード例 #19
0
ファイル: SL_CoSLAMRobustBA.cpp プロジェクト: Kyate/CoSLAM
void RobustBundleRTS::updateNewPosesPoints() {
	//update the points
	for (MapPoint* mpt = pCoSLAM->curMapPts.getHead(); mpt; mpt = mpt->next) {
		if (mpt->lastFrame <= firstKeyFrame->f)
			continue;

		if (mpt->isLocalStatic())
			updateStaticPointPosition(pCoSLAM->numCams, mpt,
					Const::PIXEL_ERR_VAR, true);
		else if (mpt->isLocalDynamic())
			updateDynamicPointPosition(pCoSLAM->numCams, mpt,
					Const::PIXEL_ERR_VAR, true);
	}
	for (MapPoint* mpt = pCoSLAM->actMapPts.getHead(); mpt; mpt = mpt->next) {
		if (mpt->lastFrame <= firstKeyFrame->f)
			continue;
		if (mpt->isLocalStatic())
			updateStaticPointPosition(pCoSLAM->numCams, mpt,
					Const::PIXEL_ERR_VAR, true);
		else if (mpt->isLocalStatic())
			updateDynamicPointPosition(pCoSLAM->numCams, mpt,
					Const::PIXEL_ERR_VAR, true);
	}
}
コード例 #20
0
 void LocalMapping::CreateNewMapPoints()
 {
     // Take neighbor keyframes in covisibility graph
     vector<KeyFrame*> vpNeighKFs = mpCurrentKeyFrame->GetBestCovisibilityKeyFrames(20);
     
     ORBmatcher matcher(0.6,false);
     
     cv::Mat Rcw1 = mpCurrentKeyFrame->GetRotation();
     cv::Mat Rwc1 = Rcw1.t();
     cv::Mat tcw1 = mpCurrentKeyFrame->GetTranslation();
     cv::Mat Tcw1(3,4,CV_32F);
     Rcw1.copyTo(Tcw1.colRange(0,3));
     tcw1.copyTo(Tcw1.col(3));
     cv::Mat Ow1 = mpCurrentKeyFrame->GetCameraCenter();
     
     const float fx1 = mpCurrentKeyFrame->fx;
     const float fy1 = mpCurrentKeyFrame->fy;
     const float cx1 = mpCurrentKeyFrame->cx;
     const float cy1 = mpCurrentKeyFrame->cy;
     const float invfx1 = 1.0f/fx1;
     const float invfy1 = 1.0f/fy1;
     
     const float ratioFactor = 1.5f*mpCurrentKeyFrame->GetScaleFactor();
     
     // Search matches with epipolar restriction and triangulate
     for(size_t i=0; i<vpNeighKFs.size(); i++)
     {
         KeyFrame* pKF2 = vpNeighKFs[i];
         
         // Check first that baseline is not too short
         // Small translation errors for short baseline keyframes make scale to diverge
         cv::Mat Ow2 = pKF2->GetCameraCenter();
         cv::Mat vBaseline = Ow2-Ow1;
         const float baseline = cv::norm(vBaseline);
         const float medianDepthKF2 = pKF2->ComputeSceneMedianDepth(2);
         const float ratioBaselineDepth = baseline/medianDepthKF2;
         
         if(ratioBaselineDepth<0.01)
             continue;
         
         // Compute Fundamental Matrix
         cv::Mat F12 = ComputeF12(mpCurrentKeyFrame,pKF2);
         
         // Search matches that fulfil epipolar constraint
         vector<cv::KeyPoint> vMatchedKeysUn1;
         vector<cv::KeyPoint> vMatchedKeysUn2;
         vector<pair<size_t,size_t> > vMatchedIndices;
         matcher.SearchForTriangulation(mpCurrentKeyFrame,pKF2,F12,vMatchedKeysUn1,vMatchedKeysUn2,vMatchedIndices);
         
         cv::Mat Rcw2 = pKF2->GetRotation();
         cv::Mat Rwc2 = Rcw2.t();
         cv::Mat tcw2 = pKF2->GetTranslation();
         cv::Mat Tcw2(3,4,CV_32F);
         Rcw2.copyTo(Tcw2.colRange(0,3));
         tcw2.copyTo(Tcw2.col(3));
         
         const float fx2 = pKF2->fx;
         const float fy2 = pKF2->fy;
         const float cx2 = pKF2->cx;
         const float cy2 = pKF2->cy;
         const float invfx2 = 1.0f/fx2;
         const float invfy2 = 1.0f/fy2;
         
         // Triangulate each match
         for(size_t ikp=0, iendkp=vMatchedKeysUn1.size(); ikp<iendkp; ikp++)
         {
             const int idx1 = vMatchedIndices[ikp].first;
             const int idx2 = vMatchedIndices[ikp].second;
             
             const cv::KeyPoint &kp1 = vMatchedKeysUn1[ikp];
             const cv::KeyPoint &kp2 = vMatchedKeysUn2[ikp];
             
             // Check parallax between rays
             cv::Mat xn1 = (cv::Mat_<float>(3,1) << (kp1.pt.x-cx1)*invfx1, (kp1.pt.y-cy1)*invfy1, 1.0 );
             cv::Mat ray1 = Rwc1*xn1;
             cv::Mat xn2 = (cv::Mat_<float>(3,1) << (kp2.pt.x-cx2)*invfx2, (kp2.pt.y-cy2)*invfy2, 1.0 );
             cv::Mat ray2 = Rwc2*xn2;
             const float cosParallaxRays = ray1.dot(ray2)/(cv::norm(ray1)*cv::norm(ray2));
             
             if(cosParallaxRays<0 || cosParallaxRays>0.9998)
                 continue;
             
             // Linear Triangulation Method
             cv::Mat A(4,4,CV_32F);
             A.row(0) = xn1.at<float>(0)*Tcw1.row(2)-Tcw1.row(0);
             A.row(1) = xn1.at<float>(1)*Tcw1.row(2)-Tcw1.row(1);
             A.row(2) = xn2.at<float>(0)*Tcw2.row(2)-Tcw2.row(0);
             A.row(3) = xn2.at<float>(1)*Tcw2.row(2)-Tcw2.row(1);
             
             cv::Mat w,u,vt;
             cv::SVD::compute(A,w,u,vt,cv::SVD::MODIFY_A| cv::SVD::FULL_UV);
             
             cv::Mat x3D = vt.row(3).t();
             
             if(x3D.at<float>(3)==0)
                 continue;
             
             // Euclidean coordinates
             x3D = x3D.rowRange(0,3)/x3D.at<float>(3);
             cv::Mat x3Dt = x3D.t();
             
             //Check triangulation in front of cameras
             float z1 = Rcw1.row(2).dot(x3Dt)+tcw1.at<float>(2);
             if(z1<=0)
                 continue;
             
             float z2 = Rcw2.row(2).dot(x3Dt)+tcw2.at<float>(2);
             if(z2<=0)
                 continue;
             
             //Check reprojection error in first keyframe
             float sigmaSquare1 = mpCurrentKeyFrame->GetSigma2(kp1.octave);
             float x1 = Rcw1.row(0).dot(x3Dt)+tcw1.at<float>(0);
             float y1 = Rcw1.row(1).dot(x3Dt)+tcw1.at<float>(1);
             float invz1 = 1.0/z1;
             float u1 = fx1*x1*invz1+cx1;
             float v1 = fy1*y1*invz1+cy1;
             float errX1 = u1 - kp1.pt.x;
             float errY1 = v1 - kp1.pt.y;
             if((errX1*errX1+errY1*errY1)>5.991*sigmaSquare1)
                 continue;
             
             //Check reprojection error in second keyframe
             float sigmaSquare2 = pKF2->GetSigma2(kp2.octave);
             float x2 = Rcw2.row(0).dot(x3Dt)+tcw2.at<float>(0);
             float y2 = Rcw2.row(1).dot(x3Dt)+tcw2.at<float>(1);
             float invz2 = 1.0/z2;
             float u2 = fx2*x2*invz2+cx2;
             float v2 = fy2*y2*invz2+cy2;
             float errX2 = u2 - kp2.pt.x;
             float errY2 = v2 - kp2.pt.y;
             if((errX2*errX2+errY2*errY2)>5.991*sigmaSquare2)
                 continue;
             
             //Check scale consistency
             cv::Mat normal1 = x3D-Ow1;
             float dist1 = cv::norm(normal1);
             
             cv::Mat normal2 = x3D-Ow2;
             float dist2 = cv::norm(normal2);
             
             if(dist1==0 || dist2==0)
                 continue;
             
             float ratioDist = dist1/dist2;
             float ratioOctave = mpCurrentKeyFrame->GetScaleFactor(kp1.octave)/pKF2->GetScaleFactor(kp2.octave);
             if(ratioDist*ratioFactor<ratioOctave || ratioDist>ratioOctave*ratioFactor)
                 continue;
             
             // Triangulation is succesfull
             MapPoint* pMP = new MapPoint(x3D,mpCurrentKeyFrame,mpMap);
             
             pMP->AddObservation(pKF2,idx2);
             pMP->AddObservation(mpCurrentKeyFrame,idx1);
             
             mpCurrentKeyFrame->AddMapPoint(pMP,idx1);
             pKF2->AddMapPoint(pMP,idx2);
             
             pMP->ComputeDistinctiveDescriptors();
             
             pMP->UpdateNormalAndDepth();
             
             mpMap->AddMapPoint(pMP);
             mlpRecentAddedMapPoints.push_back(pMP);
         }
     }
 }
コード例 #21
0
	void SkinExportSerializer::fillStateData(DataPtr _data, pugi::xml_node _node)
	{
		typedef std::map<std::string, MyGUI::IntPoint> MapPoint;
		MapPoint values;

		pugi::xpath_node_set states = _node.select_nodes("BasisSkin/State");
		for (pugi::xpath_node_set::const_iterator state = states.begin(); state != states.end(); state ++)
		{
			MyGUI::IntCoord coord((std::numeric_limits<int>::max)(), (std::numeric_limits<int>::max)(), 0, 0);

			pugi::xml_attribute attribute = (*state).node().attribute("offset");
			if (!attribute.empty())
				coord = MyGUI::IntCoord::parse(attribute.value());

			std::string name = (*state).node().attribute("name").value();
			MapPoint::iterator valuesIterator = values.find(name);
			if (valuesIterator != values.end())
			{
				(*valuesIterator).second = MyGUI::IntPoint(
					(std::min)((*valuesIterator).second.left, coord.left),
					(std::min)((*valuesIterator).second.top, coord.top));
			}
			else
			{
				values[name] = coord.point();
			}

            // create, if there is no data
			name = convertExportToEditorStateName(name);
			DataPtr childData = getChildData(_data, "State", name);
			if (childData == nullptr)
			{
                childData = Data::CreateInstance();
				childData->setType(DataTypeManager::getInstance().getType("State"));
				childData->setPropertyValue("Name", name);
				_data->addChild(childData);
			}
		}

		for (Data::VectorData::const_iterator child = _data->getChilds().begin(); child != _data->getChilds().end(); child ++)
		{
			if ((*child)->getType()->getName() != "State")
				continue;

			DataPtr childData = (*child);
			MapPoint::iterator result = values.find(convertEditorToExportStateName(childData->getPropertyValue("Name")));
			if (result != values.end())
			{
				childData->setPropertyValue("Visible", "True");
				if ((*result).second.left != (std::numeric_limits<int>::max)() &&
					(*result).second.top != (std::numeric_limits<int>::max)())
					childData->setPropertyValue("Point", (*result).second);
			}
		}

		states = _node.select_nodes("BasisSkin/State[@colour]");
		for (pugi::xpath_node_set::const_iterator state = states.begin(); state != states.end(); state ++)
		{
			std::string name = (*state).node().attribute("name").value();
			int textShift = MyGUI::utility::parseValue<int>((*state).node().attribute("shift").value());
			MyGUI::Colour textColour = MyGUI::utility::parseValue<MyGUI::Colour>((*state).node().attribute("colour").value());

			for (Data::VectorData::const_iterator child = _data->getChilds().begin(); child != _data->getChilds().end(); child ++)
			{
				if ((*child)->getType()->getName() != "State")
					continue;

				DataPtr childData = (*child);
				if (convertEditorToExportStateName(childData->getPropertyValue("Name")) == name)
				{
					childData->setPropertyValue("TextShift", textShift);
					childData->setPropertyValue("TextColour", MyGUI::utility::toString(textColour.red, " ", textColour.green, " ", textColour.blue));
				}
			}
		}
	}
コード例 #22
0
int NewMapPtsSURF::reconstructTracks(Track2D tracks[], int numTracks, int curFrame, std::vector<MapPoint*>& mapPts, //new map points that are generated
		int minLen,
		double maxRpErr) {

	std::vector<FeaturePoint*> reconFeatPts[SLAM_MAX_NUM];
	int num = 0;
	for (int k = 0; k < numTracks; k++) {
		if (tracks[k].length() >= minLen) {
			Mat_d ms(numCams, 2);
			Mat_d nms(numCams, 2);
			Mat_d Ks(numCams, 9);
			Mat_d Rs(numCams, 9);
			Mat_d Ts(numCams, 3);

			int npts = 0;
			for (Track2DNode* pTkNode = tracks[k].head.next; pTkNode; pTkNode = pTkNode->next) {
				int camId = m_camGroup.camIds[pTkNode->f];
				ms.data[2 * npts] = pTkNode->x;
				ms.data[2 * npts + 1] = pTkNode->y;

				//normalize the image coordinates of the feature points
				normPoint(m_invK[camId], ms.data + 2 * npts, nms.data + 2 * npts);

				memcpy(Ks.data + 9 * npts, m_K[camId], sizeof(double) * 9);
				memcpy(Rs.data + 9 * npts, m_R[camId], sizeof(double) * 9);
				memcpy(Ts.data + 3 * npts, m_t[camId], sizeof(double) * 3);
				npts++;
			}

			double M[4];
			triangulateMultiView(npts, Rs.data, Ts.data, nms.data, M);
			bool outlier = false;
			//check re-projection error
			for (int i = 0; i < npts; i++) {
				double rm[2];
				project(Ks.data + 9 * i, Rs.data + 9 * i, Ts.data + 3 * i, M, rm);
				double err = dist2(ms.data + 2 * i, rm);
				if (err > maxRpErr || isAtCameraBack(Rs.data + 9 * i, Ts.data + 3 * i, M)) {
					outlier = true;
					break;
				}
			}
			//if it is a inlier, a new map point is generated
			if (!outlier) {
				MapPoint* pM = new MapPoint(M[0], M[1], M[2], curFrame);
				mapPts.push_back(pM);
				//get the triangulation covariance
				getTriangulateCovMat(npts, Ks.data, Rs.data, Ts.data, M, pM->cov, Const::PIXEL_ERR_VAR);
				for (Track2DNode* pTkNode = tracks[k].head.next; pTkNode; pTkNode = pTkNode->next) {
					int camId = m_camGroup.camIds[pTkNode->f];
					FeaturePoint* fp = pTkNode->pt;
					reconFeatPts[pTkNode->f].push_back(fp);
					fp->reprojErr = reprojErrorSingle(fp->K, fp->cam->R, fp->cam->t, M, fp->m);
					pM->addFeature(camId, fp);
				}
				pM->setUncertain();
				num++;
			}
		}
	}

	//test
	logInfo("%d new map points are generated!\n", num);

	for (int c = 0; c < numCams; c++) {
		int camId = m_camGroup.camIds[c];
		pCoSLAM->slam[camId].feedExtraFeatPtsToTracker(reconFeatPts[camId]);
	}
	return num;

}
コード例 #23
0
void NewMapPts::decidePointType() {
	using namespace std;
	assert(pCoSLAM);

	const int hw = 20;

	//generate dynamic masks
	int numCams = pCoSLAM->numCams;
	Mat_uc mask[SLAM_MAX_NUM];
	for (int c = 0; c < numCams; c++) {
		int camId = m_camGroup.camIds[c];
		mask[camId].resize(pCoSLAM->slam[camId].H, pCoSLAM->slam[camId].W);
		mask[camId].fill(0);
	}

	for (int c = 0; c < numCams; c++) {
		int camId = m_camGroup.camIds[c];
		vector<FeaturePoint*> vecDynMapPts;
		pCoSLAM->slam[camId].getMappedDynPts(vecDynMapPts);

		int W = mask[camId].cols;
		int H = mask[camId].rows;
		for (size_t i = 0; i < vecDynMapPts.size(); i++) {
			FeaturePoint* fp = vecDynMapPts[i];
			int x = static_cast<int>(fp->x + 0.5);
			int y = static_cast<int>(fp->y + 0.5);

			for (int s = -hw; s <= hw; s++) {
				for (int t = -hw; t <= hw; t++) {
					int x1 = x + t;
					int y1 = y + s;
					if (x1 < 0 || x1 >= W || y1 < 0 || y1 >= H)
						continue;
					mask[camId].data[y1 * W + x1] = 1;
				}
			}
		}
	}

	for (size_t i = 0; i < newMapPts.size(); i++) {
		MapPoint* mpt = newMapPts[i];
		if (mpt->isUncertain()) {
			bool isStatic = true;
			for (int c = 0; c < numCams; c++) {
				int camId = m_camGroup.camIds[c];
				int W = mask[camId].cols;
				int H = mask[camId].rows;
				FeaturePoint* fp = mpt->pFeatures[camId];
				if (fp) {
					int x = static_cast<int>(fp->x + 0.5);
					int y = static_cast<int>(fp->y + 0.5);

					if (x >= 0 && x < W && y >= 0 && y < H) {
						if (mask[camId][y * W + x] > 0) {
							isStatic = false;
							break;
						}
					}
				}
			}
			if (isStatic) {
				mpt->setLocalStatic();
			}
		}
	}

}
コード例 #24
0
int NewMapPtsNCC::reconstructTracks(Track2D tracks[], int numTracks,
		int curFrame, std::vector<MapPoint*>& mapPts, int minLen,
		double maxRpErr) {
	int num = 0;
	for (int k = 0; k < numTracks; k++) {
		if (tracks[k].length() >= minLen) {
			Mat_d ms(numCams, 2);
			Mat_d nms(numCams, 2);
			Mat_d Ks(numCams, 9);
			Mat_d Rs(numCams, 9);
			Mat_d Ts(numCams, 3);

			int npts = 0;
			for (Track2DNode* pTkNode = tracks[k].head.next; pTkNode; pTkNode =
					pTkNode->next) {
				int camId = pTkNode->f;
				ms.data[2 * npts] = pTkNode->x;
				ms.data[2 * npts + 1] = pTkNode->y;

				//normalize the image coordinates of the feature points
				normPoint(m_invK[camId], ms.data + 2 * npts,
						nms.data + 2 * npts);

				memcpy(Ks.data + 9 * npts, m_K[camId], sizeof(double) * 9);
				memcpy(Rs.data + 9 * npts, m_R[camId], sizeof(double) * 9);
				memcpy(Ts.data + 3 * npts, m_t[camId], sizeof(double) * 3);
				npts++;
			}

			double M[4];
			triangulateMultiView(npts, Rs.data, Ts.data, nms.data, M);
			bool outlier = false;
			//check re-projection error
			for (int i = 0; i < npts; i++) {
				double rm[2];
				project(Ks.data + 9 * i, Rs.data + 9 * i, Ts.data + 3 * i, M,
						rm);
				double err = dist2(ms.data + 2 * i, rm);
				if (err > maxRpErr
						|| isAtCameraBack(Rs.data + 9 * i, Ts.data + 3 * i, M)) {
					outlier = true;
					break;
				}
			}
			//if it is a inlier, a new map point is generated
			if (!outlier) {
				MapPoint* pM = new MapPoint(M[0], M[1], M[2], curFrame);
				mapPts.push_back(pM);
				//get the triangulation covariance
				getTriangulateCovMat(npts, Ks.data, Rs.data, Ts.data, M,
						pM->cov, Const::PIXEL_ERR_VAR);
				int nDyn = 0;
				for (Track2DNode* pTkNode = tracks[k].head.next; pTkNode;
						pTkNode = pTkNode->next) {
					int iCam = m_camGroup.camIds[pTkNode->f];
					FeaturePoint* fp = pTkNode->pt;
					fp->reprojErr = reprojErrorSingle(fp->K, fp->cam->R,
							fp->cam->t, M, fp->m);
					pM->addFeature(iCam, fp);
					if (fp->type == TYPE_FEATPOINT_DYNAMIC)
						nDyn++;
				}
				if (nDyn > 1) {
					pM->setLocalDynamic();
				}
//				else if (nDyn == 1)
//					pM->type = TYPE_MAPPOINT_UNCERTAIN;
//				else{
//					pM->type = TYPE_MAPPOINT_UNCERTAIN;
//					pM->newPt = true;
//				}
				else
					pM->setUncertain();
				num++;
			}
		}
	}
	return num;
}
コード例 #25
0
ファイル: MapMakerServerBase.cpp プロジェクト: wavelab/mcptam
// Tries to make a new map point out of a single candidate point
// by searching for that point in another keyframe, and triangulating
// if a match is found.
bool MapMakerServerBase::AddPointEpipolar(KeyFrame& kfSrc, KeyFrame& kfTarget, int nLevel, int nCandidate)
{
  // debug
  static GVars3::gvar3<int> gvnCrossCamera("CrossCamera", 1, GVars3::HIDDEN | GVars3::SILENT);
  if (!*gvnCrossCamera && kfSrc.mCamName != kfTarget.mCamName)
    return false;

  TaylorCamera& cameraSrc = mmCameraModels[kfSrc.mCamName];
  TaylorCamera& cameraTarget = mmCameraModels[kfTarget.mCamName];

  int nLevelScale = LevelScale(nLevel);
  Candidate& candidate = kfSrc.maLevels[nLevel].vCandidates[nCandidate];
  CVD::ImageRef irLevelPos = candidate.irLevelPos;
  TooN::Vector<2> v2RootPos = LevelZeroPos(irLevelPos, nLevel);  // The pixel coords of the candidate at level zero

  TooN::Vector<3> v3Ray_SC =
    cameraSrc.UnProject(v2RootPos);  // The pixel coords unprojected into a 3d half-line in the source kf frame
  TooN::Vector<3> v3LineDirn_TC =
    kfTarget.mse3CamFromWorld.get_rotation() * (kfSrc.mse3CamFromWorld.get_rotation().inverse() *
        v3Ray_SC);  // The direction of that line in the target kf frame
  TooN::Vector<3> v3CamCenter_TC =
    kfTarget.mse3CamFromWorld *
    kfSrc.mse3CamFromWorld.inverse().get_translation();  // The position of the source kf in the target kf frame
  TooN::Vector<3> v3CamCenter_SC =
    kfSrc.mse3CamFromWorld *
    kfTarget.mse3CamFromWorld.inverse().get_translation();  // The position of the target kf in the source kf frame

  double dMaxEpiAngle = M_PI / 3;  // the maximum angle spanned by two view rays allowed
  double dMinEpiAngle = 0.05;      // the minimum angle allowed

  // Want to figure out the min and max depths allowed on the source ray, which will be determined by the minimum and
  // maximum allowed epipolar angle
  // See diagram below, which shows the min and max epipolar angles.
  /*
   *              /\
   *             / m\
   *            /  i \
   *           /`. n  \
   *          / m `.   \
   *         /  a   `.  \
   *        /   x     `. \
   *       /____________`.\
   *    Source           Target
   */

  double dSeparationDist = norm(v3CamCenter_SC);
  double dSourceAngle =
    acos((v3CamCenter_SC * v3Ray_SC) / dSeparationDist);  // v3Ray_SC is unit length so don't have to divide

  double dMinTargetAngle = M_PI - dSourceAngle - dMaxEpiAngle;
  double dStartDepth = dSeparationDist * sin(dMinTargetAngle) / sin(dMaxEpiAngle);

  double dMaxTargetAngle = M_PI - dSourceAngle - dMinEpiAngle;
  double dEndDepth = dSeparationDist * sin(dMaxTargetAngle) / sin(dMinEpiAngle);

  if (dStartDepth < 0.2)  // don't bother looking too close
    dStartDepth = 0.2;

  ROS_DEBUG_STREAM("dStartDepth: " << dStartDepth << " dEndDepth: " << dEndDepth);

  TooN::Vector<3> v3RayStart_TC =
    v3CamCenter_TC + dStartDepth * v3LineDirn_TC;  // The start of the epipolar line segment in the target kf frame
  TooN::Vector<3> v3RayEnd_TC =
    v3CamCenter_TC + dEndDepth * v3LineDirn_TC;  // The end of the epipolar line segment in the target kf frame

  // Project epipolar line segment start and end points onto unit sphere and check for minimum distance between them
  TooN::Vector<3> v3A = v3RayStart_TC;
  normalize(v3A);
  TooN::Vector<3> v3B = v3RayEnd_TC;
  normalize(v3B);
  TooN::Vector<3> v3BetweenEndpoints = v3A - v3B;

  if (v3BetweenEndpoints * v3BetweenEndpoints < 0.00000001)
  {
    ROS_DEBUG_STREAM("MapMakerServerBase: v3BetweenEndpoints too small.");
    return false;
  }

  // Now we want to construct a bunch of hypothetical point locations, so we can warp the source patch
  // into the target KF and look for a match. To do this, need to partition the epipolar arc in the target
  // KF equally, rather than the source ray equally. The epipolar arc lies at the intersection of the epipolar
  // plane and the unit circle of the target KF. We will construct a matrix that projects 3-vectors onto
  // the epipolar plane, and use it to define the start and stop coordinates of a unit circle by
  // projecting the ray start and ray end vectors. Then it's just a matter of partitioning the unit circle, and
  // projecting each partition point onto the source ray (keeping in mind that the source ray is in the
  // epipolar plane too).

  // Find the normal of the epipolar plane
  TooN::Vector<3> v3PlaneNormal = v3A ^ v3B;
  normalize(v3PlaneNormal);
  TooN::Vector<3> v3PlaneI =
    v3A;  // Lets call the vector we got from the start of the epipolar line segment the new coordinate frame's x axis
  TooN::Vector<3> v3PlaneJ = v3PlaneNormal ^ v3PlaneI;  // Get the y axis

  // This will convert a 3D point to the epipolar plane's coordinate frame
  TooN::Matrix<3> m3ToPlaneCoords;
  m3ToPlaneCoords[0] = v3PlaneI;
  m3ToPlaneCoords[1] = v3PlaneJ;
  m3ToPlaneCoords[2] = v3PlaneNormal;

  TooN::Vector<2> v2PlaneB = (m3ToPlaneCoords * v3B).slice<0, 2>();  // The vector we got from the end of the epipolar
  // line segment, in epipolar plane coordinates
  TooN::Vector<2> v2PlaneI = TooN::makeVector(1, 0);

  double dMaxAngleAlongCircle =
    acos(v2PlaneB * v2PlaneI);  // The angle between point B (now a 2D point in the plane) and the x axis

  // For stepping along the circle
  double dAngleStep = cameraTarget.OnePixelAngle() * LevelScale(nLevel) * 3;
  int nSteps = ceil(dMaxAngleAlongCircle / dAngleStep);
  dAngleStep = dMaxAngleAlongCircle / nSteps;

  TooN::Vector<2> v2RayStartInPlane = (m3ToPlaneCoords * v3RayStart_TC).slice<0, 2>();
  TooN::Vector<2> v2RayEndInPlane = (m3ToPlaneCoords * v3RayEnd_TC).slice<0, 2>();
  TooN::Vector<2> v2RayDirInPlane = v2RayEndInPlane - v2RayStartInPlane;
  normalize(v2RayDirInPlane);

  // First in world frame, second in camera frame
  std::vector<std::pair<TooN::Vector<3>, TooN::Vector<3>>> vMapPointPositions;
  TooN::SE3<> se3WorldFromTargetCam = kfTarget.mse3CamFromWorld.inverse();
  for (int i = 0; i < nSteps + 1; ++i)  // stepping along circle
  {
    double dAngle = i * dAngleStep;                                  // current angle
    TooN::Vector<2> v2CirclePoint = TooN::makeVector(cos(dAngle), sin(dAngle));  // point on circle

    // Backproject onto view ray, this is the depth along view ray where we intersect
    double dAlpha = (v2RayStartInPlane[0] * v2CirclePoint[1] - v2RayStartInPlane[1] * v2CirclePoint[0]) /
                    (v2RayDirInPlane[1] * v2CirclePoint[0] - v2RayDirInPlane[0] * v2CirclePoint[1]);

    TooN::Vector<3> v3PointPos_TC = v3RayStart_TC + dAlpha * v3LineDirn_TC;
    TooN::Vector<3> v3PointPos = se3WorldFromTargetCam * v3PointPos_TC;
    vMapPointPositions.push_back(std::make_pair(v3PointPos, v3PointPos_TC));
  }

  // This will be the map point that we place at the different depths in order to generate warped patches
  MapPoint point;
  point.mpPatchSourceKF = &kfSrc;
  point.mnSourceLevel = nLevel;
  point.mv3Normal_NC = TooN::makeVector(0, 0, -1);
  point.mirCenter = irLevelPos;
  point.mv3Center_NC = cameraSrc.UnProject(v2RootPos);
  point.mv3OneRightFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(nLevelScale, 0)));
  point.mv3OneDownFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(0, nLevelScale)));

  normalize(point.mv3Center_NC);
  normalize(point.mv3OneRightFromCenter_NC);
  normalize(point.mv3OneDownFromCenter_NC);

  PatchFinder finder;
  int nMaxZMSSD = finder.mnMaxSSD + 1;
  int nBestZMSSD = nMaxZMSSD;
  int nBest = -1;
  TooN::Vector<2> v2BestMatch = TooN::Zeros;

  std::vector<std::tuple<int, int, TooN::Vector<2>>> vScoresIndicesBestMatches;

  for (unsigned i = 0; i < vMapPointPositions.size(); ++i)  // go through all our hypothesized map points
  {
    point.mv3WorldPos = vMapPointPositions[i].first;
    point.RefreshPixelVectors();

    TooN::Vector<2> v2Image = cameraTarget.Project(vMapPointPositions[i].second);

    if (cameraTarget.Invalid())
      continue;

    if (!kfTarget.maLevels[0].image.in_image(CVD::ir(v2Image)))
      continue;

    // Check if projected point is in a masked portion of the target keyframe
    if (kfTarget.maLevels[0].mask.totalsize() > 0 && kfTarget.maLevels[0].mask[CVD::ir(v2Image)] == 0)
      continue;

    TooN::Matrix<2> m2CamDerivs = cameraTarget.GetProjectionDerivs();

    int nSearchLevel = finder.CalcSearchLevelAndWarpMatrix(point, kfTarget.mse3CamFromWorld, m2CamDerivs);
    if (nSearchLevel == -1)
      continue;

    finder.MakeTemplateCoarseCont(point);

    if (finder.TemplateBad())
      continue;

    int nScore;
    bool bExhaustive =
      false;  // Should we do an exhaustive search of the target area? Should maybe make this into a param
    bool bFound = finder.FindPatchCoarse(CVD::ir(v2Image), kfTarget, 3, nScore, bExhaustive);

    if (!bFound)
      continue;

    vScoresIndicesBestMatches.push_back(std::make_tuple(nScore, i, finder.GetCoarsePosAsVector()));

    if (nScore < nBestZMSSD)
    {
      nBestZMSSD = nScore;
      nBest = i;
      v2BestMatch = finder.GetCoarsePosAsVector();
    }
  }

  if (nBest == -1)
  {
    ROS_DEBUG_STREAM("No match found.");
    return false;
  }

  std::sort(vScoresIndicesBestMatches.begin(), vScoresIndicesBestMatches.end(), compScores);

  // We want matches that are unambigous, so if there are many good matches along the view ray,
  // we can't say for certain where the best one really is. Therefore, implement the following rule:
  // Best zmssd has to be 10% better than nearest other, unless that nearest other is 1 index away
  // from best

  int nResizeTo = 1;
  for (unsigned i = 1; i < vScoresIndicesBestMatches.size(); ++i)
  {
    if (std::get<0>(vScoresIndicesBestMatches[i]) > nBestZMSSD * 0.9)  // within 10% of best
      nResizeTo++;
  }

  // Too many high scoring points, since the best can be within 10% of at most two other points.
  // We can't be certain of what is best, get out of here
  if (nResizeTo > 3)
    return false;

  vScoresIndicesBestMatches.resize(nResizeTo);  // chop!

  // All the points left in vScoresIndicesBestMatches should be within 1 idx of best, otherwise our matches are ambigous
  // Test index distance:
  for (unsigned i = 1; i < vScoresIndicesBestMatches.size(); ++i)
  {
    if (abs(std::get<1>(vScoresIndicesBestMatches[i]) - nBest) > 1)  // bad, index too far away, get out of here
      return false;
  }

  // Now all the points in vScoresIndicesBestMatches can be considered potential matches

  TooN::Vector<2> v2SubPixPos = TooN::makeVector(-1, -1);
  bool bGotGoodSubpix = false;
  for (unsigned i = 0; i < vScoresIndicesBestMatches.size(); ++i)  // go through all potential good matches
  {
    int nCurrBest = std::get<1>(vScoresIndicesBestMatches[i]);
    TooN::Vector<2> v2CurrBestMatch = std::get<2>(vScoresIndicesBestMatches[i]);

    point.mv3WorldPos = vMapPointPositions[nCurrBest].first;
    point.RefreshPixelVectors();

    cameraTarget.Project(vMapPointPositions[nCurrBest].second);
    TooN::Matrix<2> m2CamDerivs = cameraTarget.GetProjectionDerivs();

    finder.CalcSearchLevelAndWarpMatrix(point, kfTarget.mse3CamFromWorld, m2CamDerivs);
    finder.MakeTemplateCoarseCont(point);
    finder.SetSubPixPos(v2CurrBestMatch);

    // Try to get subpixel convergence
    bool bSubPixConverges = finder.IterateSubPixToConvergence(kfTarget, 10);

    if (!bSubPixConverges)
      continue;

    // First one to make it here wins. Keep in mind that vScoresIndicesBestMatches is ordered by
    // score, so we're trying the points in descent order of potential
    bGotGoodSubpix = true;
    v2SubPixPos = finder.GetSubPixPos();
    break;
  }

  // None of the candidates had subpix converge? Bad match...
  if (!bGotGoodSubpix)
    return false;

  // Now triangulate the 3d point...
  TooN::Vector<3> v3New;
  v3New = kfTarget.mse3CamFromWorld.inverse() *
          ReprojectPoint(kfSrc.mse3CamFromWorld * kfTarget.mse3CamFromWorld.inverse(), cameraSrc.UnProject(v2RootPos),
                         cameraTarget.UnProject(v2SubPixPos));

  MapPoint* pPointNew = new MapPoint;

  pPointNew->mv3WorldPos = v3New;

  // Patch source stuff:
  pPointNew->mpPatchSourceKF = &kfSrc;
  pPointNew->mnSourceLevel = nLevel;
  pPointNew->mv3Normal_NC = TooN::makeVector(0, 0, -1);
  pPointNew->mirCenter = irLevelPos;
  pPointNew->mv3Center_NC = cameraSrc.UnProject(v2RootPos);
  pPointNew->mv3OneRightFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(nLevelScale, 0)));
  pPointNew->mv3OneDownFromCenter_NC = cameraSrc.UnProject(v2RootPos + vec(CVD::ImageRef(0, nLevelScale)));

  normalize(pPointNew->mv3Center_NC);
  normalize(pPointNew->mv3OneDownFromCenter_NC);
  normalize(pPointNew->mv3OneRightFromCenter_NC);

  pPointNew->RefreshPixelVectors();

  Measurement* pMeasSrc = new Measurement;
  pMeasSrc->eSource = Measurement::SRC_ROOT;
  pMeasSrc->v2RootPos = v2RootPos;
  pMeasSrc->nLevel = nLevel;
  pMeasSrc->bSubPix = true;

  Measurement* pMeasTarget = new Measurement;
  *pMeasTarget = *pMeasSrc;  // copy data
  pMeasTarget->eSource = Measurement::SRC_EPIPOLAR;
  pMeasTarget->v2RootPos = v2SubPixPos;

  // Record map point and its measurement in the right places
  kfSrc.AddMeasurement(pPointNew, pMeasSrc);
  kfTarget.AddMeasurement(pPointNew, pMeasTarget);

  // kfSrc.mmpMeasurements[pPointNew] = pMeasSrc;
  // kfTarget.mmpMeasurements[pPointNew] = pMeasTarget;
  // pPointNew->mMMData.spMeasurementKFs.insert(&kfSrc);
  // pPointNew->mMMData.spMeasurementKFs.insert(&kfTarget);

  mMap.mlpPoints.push_back(pPointNew);
  mlpNewQueue.push_back(pPointNew);

  return true;
}
コード例 #26
0
 void LoopClosing::CorrectLoop()
 {
     // Send a stop signal to Local Mapping
     // Avoid new keyframes are inserted while correcting the loop
     mpLocalMapper->RequestStop();
     
     // Wait until Local Mapping has effectively stopped
     //ros::Rate r(1e4);
     //while(ros::ok() && !mpLocalMapper->isStopped())
     while(!mpLocalMapper->isStopped())
     {
         //r.sleep();
         boost::this_thread::sleep(boost::posix_time::milliseconds(10000));
     }
     
     // Ensure current keyframe is updated
     mpCurrentKF->UpdateConnections();
     
     // Retrive keyframes connected to the current keyframe and compute corrected Sim3 pose by propagation
     mvpCurrentConnectedKFs = mpCurrentKF->GetVectorCovisibleKeyFrames();
     mvpCurrentConnectedKFs.push_back(mpCurrentKF);
     
     KeyFrameAndPose CorrectedSim3, NonCorrectedSim3;
     CorrectedSim3[mpCurrentKF]=mg2oScw;
     cv::Mat Twc = mpCurrentKF->GetPoseInverse();
     
     
     for(vector<KeyFrame*>::iterator vit=mvpCurrentConnectedKFs.begin(), vend=mvpCurrentConnectedKFs.end(); vit!=vend; vit++)
     {
         KeyFrame* pKFi = *vit;
         
         cv::Mat Tiw = pKFi->GetPose();
         
         if(pKFi!=mpCurrentKF)
         {
             cv::Mat Tic = Tiw*Twc;
             cv::Mat Ric = Tic.rowRange(0,3).colRange(0,3);
             cv::Mat tic = Tic.rowRange(0,3).col(3);
             g2o::Sim3 g2oSic(Converter::toMatrix3d(Ric),Converter::toVector3d(tic),1.0);
             g2o::Sim3 g2oCorrectedSiw = g2oSic*mg2oScw;
             //Pose corrected with the Sim3 of the loop closure
             CorrectedSim3[pKFi]=g2oCorrectedSiw;
         }
         
         cv::Mat Riw = Tiw.rowRange(0,3).colRange(0,3);
         cv::Mat tiw = Tiw.rowRange(0,3).col(3);
         g2o::Sim3 g2oSiw(Converter::toMatrix3d(Riw),Converter::toVector3d(tiw),1.0);
         //Pose without correction
         NonCorrectedSim3[pKFi]=g2oSiw;
     }
     
     // Correct all MapPoints obsrved by current keyframe and neighbors, so that they align with the other side of the loop
     for(KeyFrameAndPose::iterator mit=CorrectedSim3.begin(), mend=CorrectedSim3.end(); mit!=mend; mit++)
     {
         KeyFrame* pKFi = mit->first;
         g2o::Sim3 g2oCorrectedSiw = mit->second;
         g2o::Sim3 g2oCorrectedSwi = g2oCorrectedSiw.inverse();
         
         g2o::Sim3 g2oSiw =NonCorrectedSim3[pKFi];
         
         vector<MapPoint*> vpMPsi = pKFi->GetMapPointMatches();
         for(size_t iMP=0, endMPi = vpMPsi.size(); iMP<endMPi; iMP++)
         {
             MapPoint* pMPi = vpMPsi[iMP];
             if(!pMPi)
                 continue;
             if(pMPi->isBad())
                 continue;
             if(pMPi->mnCorrectedByKF==mpCurrentKF->mnId)
                 continue;
             
             // Project with non-corrected pose and project back with corrected pose
             cv::Mat P3Dw = pMPi->GetWorldPos();
             Eigen::Matrix<double,3,1> eigP3Dw = Converter::toVector3d(P3Dw);
             Eigen::Matrix<double,3,1> eigCorrectedP3Dw = g2oCorrectedSwi.map(g2oSiw.map(eigP3Dw));
             
             cv::Mat cvCorrectedP3Dw = Converter::toCvMat(eigCorrectedP3Dw);
             pMPi->SetWorldPos(cvCorrectedP3Dw);
             pMPi->mnCorrectedByKF = mpCurrentKF->mnId;
             pMPi->mnCorrectedReference = pKFi->mnId;
             pMPi->UpdateNormalAndDepth();
         }
         
         // Update keyframe pose with corrected Sim3. First transform Sim3 to SE3 (scale translation)
         Eigen::Matrix3d eigR = g2oCorrectedSiw.rotation().toRotationMatrix();
         Eigen::Vector3d eigt = g2oCorrectedSiw.translation();
         double s = g2oCorrectedSiw.scale();
         
         eigt *=(1./s); //[R t/s;0 1]
         
         cv::Mat correctedTiw = Converter::toCvSE3(eigR,eigt);
         
         pKFi->SetPose(correctedTiw);
         
         // Make sure connections are updated
         pKFi->UpdateConnections();
     }
     
     // Start Loop Fusion
     // Update matched map points and replace if duplicated
     for(size_t i=0; i<mvpCurrentMatchedPoints.size(); i++)
     {
         if(mvpCurrentMatchedPoints[i])
         {
             MapPoint* pLoopMP = mvpCurrentMatchedPoints[i];
             MapPoint* pCurMP = mpCurrentKF->GetMapPoint(i);
             if(pCurMP)
                 pCurMP->Replace(pLoopMP);
             else
             {
                 mpCurrentKF->AddMapPoint(pLoopMP,i);
                 pLoopMP->AddObservation(mpCurrentKF,i);
                 pLoopMP->ComputeDistinctiveDescriptors();
             }
         }
     }
     
     // Project MapPoints observed in the neighborhood of the loop keyframe
     // into the current keyframe and neighbors using corrected poses.
     // Fuse duplications.
     SearchAndFuse(CorrectedSim3);
     
     
     // After the MapPoint fusion, new links in the covisibility graph will appear attaching both sides of the loop
     map<KeyFrame*, set<KeyFrame*> > LoopConnections;
     
     for(vector<KeyFrame*>::iterator vit=mvpCurrentConnectedKFs.begin(), vend=mvpCurrentConnectedKFs.end(); vit!=vend; vit++)
     {
         KeyFrame* pKFi = *vit;
         vector<KeyFrame*> vpPreviousNeighbors = pKFi->GetVectorCovisibleKeyFrames();
         
         // Update connections. Detect new links.
         pKFi->UpdateConnections();
         LoopConnections[pKFi]=pKFi->GetConnectedKeyFrames();
         for(vector<KeyFrame*>::iterator vit_prev=vpPreviousNeighbors.begin(), vend_prev=vpPreviousNeighbors.end(); vit_prev!=vend_prev; vit_prev++)
         {
             LoopConnections[pKFi].erase(*vit_prev);
         }
         for(vector<KeyFrame*>::iterator vit2=mvpCurrentConnectedKFs.begin(), vend2=mvpCurrentConnectedKFs.end(); vit2!=vend2; vit2++)
         {
             LoopConnections[pKFi].erase(*vit2);
         }
     }
     
     mpTracker->ForceRelocalisation();
     
     Optimizer::OptimizeEssentialGraph(mpMap, mpMatchedKF, mpCurrentKF,  mg2oScw, NonCorrectedSim3, CorrectedSim3, LoopConnections);
     
     //Add edge
     mpMatchedKF->AddLoopEdge(mpCurrentKF);
     mpCurrentKF->AddLoopEdge(mpMatchedKF);
     
     std::cout << "Loop Closed!" << std::endl;
     
     // Loop closed. Release Local Mapping.
     mpLocalMapper->Release();
     
     mpMap->SetFlagAfterBA();
     
     mLastLoopKFid = mpCurrentKF->mnId;
 }
コード例 #27
0
 bool LoopClosing::ComputeSim3()
 {
     // For each consistent loop candidate we try to compute a Sim3
     
     const int nInitialCandidates = mvpEnoughConsistentCandidates.size();
     
     // We compute first ORB matches for each candidate
     // If enough matches are found, we setup a Sim3Solver
     ORBmatcher matcher(0.75,true);
     
     vector<Sim3Solver*> vpSim3Solvers;
     vpSim3Solvers.resize(nInitialCandidates);
     
     vector<vector<MapPoint*> > vvpMapPointMatches;
     vvpMapPointMatches.resize(nInitialCandidates);
     
     vector<bool> vbDiscarded;
     vbDiscarded.resize(nInitialCandidates);
     
     int nCandidates=0; //candidates with enough matches
     
     for(int i=0; i<nInitialCandidates; i++)
     {
         KeyFrame* pKF = mvpEnoughConsistentCandidates[i];
         
         // avoid that local mapping erase it while it is being processed in this thread
         pKF->SetNotErase();
         
         if(pKF->isBad())
         {
             vbDiscarded[i] = true;
             continue;
         }
         
         int nmatches = matcher.SearchByBoW(mpCurrentKF,pKF,vvpMapPointMatches[i]);
         
         if(nmatches<20)
         {
             vbDiscarded[i] = true;
             continue;
         }
         else
         {
             Sim3Solver* pSolver = new Sim3Solver(mpCurrentKF,pKF,vvpMapPointMatches[i]);
             pSolver->SetRansacParameters(0.99,20,300);
             vpSim3Solvers[i] = pSolver;
         }
         
         nCandidates++;
     }
     
     bool bMatch = false;
     
     // Perform alternatively RANSAC iterations for each candidate
     // until one is succesful or all fail
     while(nCandidates>0 && !bMatch)
     {
         for(int i=0; i<nInitialCandidates; i++)
         {
             if(vbDiscarded[i])
                 continue;
             
             KeyFrame* pKF = mvpEnoughConsistentCandidates[i];
             
             // Perform 5 Ransac Iterations
             vector<bool> vbInliers;
             int nInliers;
             bool bNoMore;
             
             Sim3Solver* pSolver = vpSim3Solvers[i];
             cv::Mat Scm  = pSolver->iterate(5,bNoMore,vbInliers,nInliers);
             
             // If Ransac reachs max. iterations discard keyframe
             if(bNoMore)
             {
                 vbDiscarded[i]=true;
                 nCandidates--;
             }
             
             // If RANSAC returns a Sim3, perform a guided matching and optimize with all correspondences
             if(!Scm.empty())
             {
                 vector<MapPoint*> vpMapPointMatches(vvpMapPointMatches[i].size(), static_cast<MapPoint*>(NULL));
                 for(size_t j=0, jend=vbInliers.size(); j<jend; j++)
                 {
                     if(vbInliers[j])
                         vpMapPointMatches[j]=vvpMapPointMatches[i][j];
                 }
                 
                 cv::Mat R = pSolver->GetEstimatedRotation();
                 cv::Mat t = pSolver->GetEstimatedTranslation();
                 const float s = pSolver->GetEstimatedScale();
                 matcher.SearchBySim3(mpCurrentKF,pKF,vpMapPointMatches,s,R,t,7.5);
                 
                 
                 g2o::Sim3 gScm(Converter::toMatrix3d(R),Converter::toVector3d(t),s);
                 const int nInliers = Optimizer::OptimizeSim3(mpCurrentKF, pKF, vpMapPointMatches, gScm, 10);
                 
                 // If optimization is succesful stop ransacs and continue
                 if(nInliers>=20)
                 {
                     bMatch = true;
                     mpMatchedKF = pKF;
                     g2o::Sim3 gSmw(Converter::toMatrix3d(pKF->GetRotation()),Converter::toVector3d(pKF->GetTranslation()),1.0);
                     mg2oScw = gScm*gSmw;
                     mScw = Converter::toCvMat(mg2oScw);
                     
                     mvpCurrentMatchedPoints = vpMapPointMatches;
                     break;
                 }
             }
         }
     }
     
     if(!bMatch)
     {
         for(int i=0; i<nInitialCandidates; i++)
             mvpEnoughConsistentCandidates[i]->SetErase();
         mpCurrentKF->SetErase();
         return false;
     }
     
     // Retrieve MapPoints seen in Loop Keyframe and neighbors
     vector<KeyFrame*> vpLoopConnectedKFs = mpMatchedKF->GetVectorCovisibleKeyFrames();
     vpLoopConnectedKFs.push_back(mpMatchedKF);
     mvpLoopMapPoints.clear();
     for(vector<KeyFrame*>::iterator vit=vpLoopConnectedKFs.begin(); vit!=vpLoopConnectedKFs.end(); vit++)
     {
         KeyFrame* pKF = *vit;
         vector<MapPoint*> vpMapPoints = pKF->GetMapPointMatches();
         for(size_t i=0, iend=vpMapPoints.size(); i<iend; i++)
         {
             MapPoint* pMP = vpMapPoints[i];
             if(pMP)
             {
                 if(!pMP->isBad() && pMP->mnLoopPointForKF!=mpCurrentKF->mnId)
                 {
                     mvpLoopMapPoints.push_back(pMP);
                     pMP->mnLoopPointForKF=mpCurrentKF->mnId;
                 }
             }
         }
     }
     
     // Find more matches projecting with the computed Sim3
     matcher.SearchByProjection(mpCurrentKF, mScw, mvpLoopMapPoints, mvpCurrentMatchedPoints,10);
     
     // If enough matches accept Loop
     int nTotalMatches = 0;
     for(size_t i=0; i<mvpCurrentMatchedPoints.size(); i++)
     {
         if(mvpCurrentMatchedPoints[i])
             nTotalMatches++;
     }
     
     if(nTotalMatches>=40)
     {
         for(int i=0; i<nInitialCandidates; i++)
             if(mvpEnoughConsistentCandidates[i]!=mpMatchedKF)
                 mvpEnoughConsistentCandidates[i]->SetErase();
         return true;
     }
     else
     {
         for(int i=0; i<nInitialCandidates; i++)
             mvpEnoughConsistentCandidates[i]->SetErase();
         mpCurrentKF->SetErase();
         return false;
     }
     
 }
コード例 #28
0
ファイル: SL_SingleSLAM.cpp プロジェクト: danping/CoSLAM
int SingleSLAM::newMapPoints(std::vector<MapPoint*>& mapPts, double maxEpiErr,
		double maxNcc) {
	std::vector<FeaturePoint*> vecFeatPts;
	getUnMappedAndTrackedFeatPts(vecFeatPts, 0, Param::nMinFeatTrkLen);

	mapPts.clear();
	mapPts.reserve(4096);

	double M[3], m1[2], m2[2];

	//reconstruct 3D map points
	int numRecons = 0;
	for (size_t k = 0; k < vecFeatPts.size(); k++) {
		FeaturePoint* cur_fp = vecFeatPts[k];
		FeaturePoint* pre_fp = cur_fp;
		while (pre_fp->preFrame && pre_fp->preFrame->cam) {
			if (pre_fp->type == TYPE_FEATPOINT_DYNAMIC) {
				break;
			}
			pre_fp = pre_fp->preFrame;
		}
		if (pre_fp->type == TYPE_FEATPOINT_DYNAMIC || !pre_fp->cam)
			continue;

		normPoint(iK.data, pre_fp->m, m1);
		normPoint(iK.data, cur_fp->m, m2);

		//triangulate the two feature points to get the 3D point
		binTriangulate(pre_fp->cam->R, pre_fp->cam->t, cur_fp->cam->R,
				cur_fp->cam->t, m1, m2, M);

		if (isAtCameraBack(cur_fp->cam->R, cur_fp->cam->t, M))
			continue;

		double cov[9], org[3];
		getBinTriangulateCovMat(K.data, pre_fp->cam->R, pre_fp->cam->t, K.data,
				cur_fp->cam->R, cur_fp->cam->t, M, cov, Const::PIXEL_ERR_VAR);
		getCameraCenter(cur_fp->cam->R, cur_fp->cam->t, org);
		double s = fabs(cov[0] + cov[4] + cov[8]);
		if (dist3(org, M) < sqrt(s))
			continue;

		//check the reprojection error
		double err1 = reprojErrorSingle(K.data, pre_fp->cam->R, pre_fp->cam->t,
				M, pre_fp->m);
		double err2 = reprojErrorSingle(K.data, cur_fp->cam->R, cur_fp->cam->t,
				M, cur_fp->m);

		if (err1 < maxEpiErr && err2 < maxEpiErr) {
			//a new map point is generated
			refineTriangulation(cur_fp, M, cov);
			err1 = reprojErrorSingle(K.data, pre_fp->cam->R, pre_fp->cam->t, M,
					pre_fp->m);
			err2 = reprojErrorSingle(K.data, cur_fp->cam->R, cur_fp->cam->t, M,
					cur_fp->m);
			if (isAtCameraBack(cur_fp->cam->R, cur_fp->cam->t, M)
					|| isAtCameraBack(pre_fp->cam->R, pre_fp->cam->t, M))
				continue;
			if (err1 < maxEpiErr && err2 < maxEpiErr) {
				MapPoint* pM = new MapPoint(M[0], M[1], M[2], pre_fp->f);
				doubleArrCopy(pM->cov, 0, cov, 9);
				mapPts.push_back(pM);
				pM->lastFrame = cur_fp->f;
				for (FeaturePoint* p = cur_fp; p; p = p->preFrame)
					p->mpt = pM;

				//add the feature point
				pM->addFeature(camId, cur_fp);
				pM->setLocalStatic();

				//compute the NCC block
				pM->nccBlks[camId].computeScaled(m_lastKeyPos->imgSmall,
						m_lastKeyPos->imgScale, cur_fp->x, cur_fp->y);

				int x = max(0, min((int) cur_fp->x, m_rgb.w - 1));
				int y = max(0, min((int) cur_fp->y, m_rgb.h - 1));
				pM->setColor(m_rgb(x, y));
				numRecons++;
			}
		}
	}
	return numRecons;
}
コード例 #29
0
void nofAttacker::MissAttackingWalk()
{
    // Ist evtl. unser Heimatgebäude zerstört?
    if(!building)
    {
        // Dann dem Ziel Bescheid sagen, falls es existiert (evtl. wurdes zufällig zur selben Zeit zerstört)
        InformTargetsAboutCancelling();

        // Ggf. Schiff Bescheid sagen (Schiffs-Angreifer)
        if(ship_obj_id)
            CancelAtShip();

        // Rumirren
        state = STATE_FIGUREWORK;
        StartWandering();
        Wander();

        return;
    }

    // Gibts das Ziel überhaupt noch?
    if(!attacked_goal)
    {
        ReturnHomeMissionAttacking();
        return;
    }

    /*// Is it still a hostile destination?
    // (Could be captured in the meantime)
    if(!players->getElement(player)->IsPlayerAttackable(attacked_goal->GetPlayer()))
    {
        ReturnHomeMissionAttacking();
        return;
    }*/

    // Eine Position rund um das Militärgebäude suchen
    MapPoint goal = attacked_goal->FindAnAttackerPlace(radius, this);

    // Keinen Platz mehr gefunden?
    if(!goal.isValid())
    {
        // Dann nach Haus gehen
        ReturnHomeMissionAttacking();
        return;
    }

    // Sind wir evtl schon da?
    if(pos == goal)
    {
        ReachedDestination();
        return;
    }

    // Find all sorts of enemies (attackers, aggressive defenders..) nearby
    if(FindEnemiesNearby())
        // Enemy found -> abort, because nofActiveSoldier handles all things now
        return;

    // Haben wir noch keinen Feind?
    // Könnte mir noch ein neuer Verteidiger entgegenlaufen?
    TryToOrderAggressiveDefender();

    // Ansonsten Weg zum Ziel suchen
    unsigned char dir = gwg->FindHumanPath(pos, goal, MAX_ATTACKING_RUN_DISTANCE, true);
    // Keiner gefunden? Nach Hause gehen
    if(dir == 0xff)
    {
        ReturnHomeMissionAttacking();
        return;
    }

    // Start walking
    StartWalking(Direction(dir));
}
コード例 #30
0
 void LocalMapping::SearchInNeighbors()
 {
     // Retrieve neighbor keyframes
     vector<KeyFrame*> vpNeighKFs = mpCurrentKeyFrame->GetBestCovisibilityKeyFrames(20);
     vector<KeyFrame*> vpTargetKFs;
     for(vector<KeyFrame*>::iterator vit=vpNeighKFs.begin(), vend=vpNeighKFs.end(); vit!=vend; vit++)
     {
         KeyFrame* pKFi = *vit;
         if(pKFi->isBad() || pKFi->mnFuseTargetForKF == mpCurrentKeyFrame->mnId)
             continue;
         vpTargetKFs.push_back(pKFi);
         pKFi->mnFuseTargetForKF = mpCurrentKeyFrame->mnId;
         
         // Extend to some second neighbors
         vector<KeyFrame*> vpSecondNeighKFs = pKFi->GetBestCovisibilityKeyFrames(5);
         for(vector<KeyFrame*>::iterator vit2=vpSecondNeighKFs.begin(), vend2=vpSecondNeighKFs.end(); vit2!=vend2; vit2++)
         {
             KeyFrame* pKFi2 = *vit2;
             if(pKFi2->isBad() || pKFi2->mnFuseTargetForKF==mpCurrentKeyFrame->mnId || pKFi2->mnId==mpCurrentKeyFrame->mnId)
                 continue;
             vpTargetKFs.push_back(pKFi2);
         }
     }
     
     
     // Search matches by projection from current KF in target KFs
     ORBmatcher matcher(0.6);
     vector<MapPoint*> vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
     for(vector<KeyFrame*>::iterator vit=vpTargetKFs.begin(), vend=vpTargetKFs.end(); vit!=vend; vit++)
     {
         KeyFrame* pKFi = *vit;
         
         matcher.Fuse(pKFi,vpMapPointMatches);
     }
     
     // Search matches by projection from target KFs in current KF
     vector<MapPoint*> vpFuseCandidates;
     vpFuseCandidates.reserve(vpTargetKFs.size()*vpMapPointMatches.size());
     
     for(vector<KeyFrame*>::iterator vitKF=vpTargetKFs.begin(), vendKF=vpTargetKFs.end(); vitKF!=vendKF; vitKF++)
     {
         KeyFrame* pKFi = *vitKF;
         
         vector<MapPoint*> vpMapPointsKFi = pKFi->GetMapPointMatches();
         
         for(vector<MapPoint*>::iterator vitMP=vpMapPointsKFi.begin(), vendMP=vpMapPointsKFi.end(); vitMP!=vendMP; vitMP++)
         {
             MapPoint* pMP = *vitMP;
             if(!pMP)
                 continue;
             if(pMP->isBad() || pMP->mnFuseCandidateForKF == mpCurrentKeyFrame->mnId)
                 continue;
             pMP->mnFuseCandidateForKF = mpCurrentKeyFrame->mnId;
             vpFuseCandidates.push_back(pMP);
         }
     }
     
     matcher.Fuse(mpCurrentKeyFrame,vpFuseCandidates);
     
     
     // Update points
     vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
     for(size_t i=0, iend=vpMapPointMatches.size(); i<iend; i++)
     {
         MapPoint* pMP=vpMapPointMatches[i];
         if(pMP)
         {
             if(!pMP->isBad())
             {
                 pMP->ComputeDistinctiveDescriptors();
                 pMP->UpdateNormalAndDepth();
             }
         }
     }
     
     // Update connections in covisibility graph
     mpCurrentKeyFrame->UpdateConnections();
 }