Transform Odometry::process(const SensorData & data, OdometryInfo * info) { if(_pose.isNull()) { _pose.setIdentity(); // initialized } UASSERT(!data.image().empty()); if(dynamic_cast<OdometryMono*>(this) == 0) { UASSERT(!data.depthOrRightImage().empty()); } if(data.fx() <= 0 || data.fyOrBaseline() <= 0) { UERROR("Rectified images required! Calibrate your camera. (fx=%f, fy/baseline=%f, cx=%f, cy=%f)", data.fx(), data.fyOrBaseline(), data.cx(), data.cy()); return Transform(); } UTimer time; Transform t = this->computeTransform(data, info); if(info) { info->time = time.elapsed(); info->lost = t.isNull(); } if(!t.isNull()) { _resetCurrentCount = _resetCountdown; if(_force2D) { float x,y,z, roll,pitch,yaw; t.getTranslationAndEulerAngles(x, y, z, roll, pitch, yaw); t = Transform(x,y,0, 0,0,yaw); } return _pose *= t; // updated } else if(_resetCurrentCount > 0) { UWARN("Odometry lost! Odometry will be reset after next %d consecutive unsuccessful odometry updates...", _resetCurrentCount); --_resetCurrentCount; if(_resetCurrentCount == 0) { UWARN("Odometry automatically reset to latest pose!"); this->reset(_pose); } } return Transform(); }
void SensorData::setUserData(const cv::Mat & userData) { if(!userData.empty() && (!_userDataCompressed.empty() || !_userDataRaw.empty())) { UWARN("Cannot write new user data (%d bytes) over existing user " "data (%d bytes, %d compressed). Set user data of %d to null " "before setting a new one.", int(userData.total()*userData.elemSize()), int(_userDataRaw.total()*_userDataRaw.elemSize()), _userDataCompressed.cols, this->id()); return; } _userDataRaw = cv::Mat(); _userDataCompressed = cv::Mat(); if(!userData.empty()) { if(userData.type() == CV_8UC1) // Bytes { _userDataCompressed = userData; // assume compressed } else { _userDataRaw = userData; _userDataCompressed = compressData2(userData); } } }
void DBReader::mainLoop() { SensorData data = this->getNextData(); if(data.isValid()) { if(!_odometryIgnored) { if(data.pose().isNull()) { UWARN("Reading the database: odometry is null! " "Please set \"Ignore odometry = true\" if there is " "no odometry in the database."); } this->post(new OdometryEvent(data)); } else { this->post(new CameraEvent(data)); } } else if(!this->isKilled()) { UINFO("no more images..."); this->kill(); this->post(new CameraEvent()); } }
void DBDriver::getNodeIdByLabel(const std::string & label, int & id) const { if(!label.empty()) { int idFound = 0; // look in the trash _trashesMutex.lock(); for(std::map<int, Signature*>::const_iterator sIter = _trashSignatures.begin(); sIter!=_trashSignatures.end(); ++sIter) { if(sIter->second->getLabel().compare(label) == 0) { idFound = sIter->first; break; } } _trashesMutex.unlock(); // then look in the database if(idFound == 0) { _dbSafeAccessMutex.lock(); this->getNodeIdByLabelQuery(label, id); _dbSafeAccessMutex.unlock(); } else { id = idFound; } } else { UWARN("Can't search with an empty label!"); } }
bool CameraImages::init() { UDEBUG(""); if(_dir) { _dir->setPath(_path, "jpg ppm png bmp pnm"); } else { _dir = new UDirectory(_path, "jpg ppm png bmp pnm"); } _count = 0; if(_path[_path.size()-1] != '\\' && _path[_path.size()-1] != '/') { _path.append("/"); } if(!_dir->isValid()) { ULOGGER_ERROR("Directory path is not valid \"%s\"", _path.c_str()); } else if(_dir->getFileNames().size() == 0) { UWARN("Directory is empty \"%s\"", _path.c_str()); } return _dir->isValid(); }
void UEventsManager::_removePipe( const UEventsSender * sender, const UEventsHandler * receiver, const std::string & eventName) { pipesMutex_.lock(); bool removed = false; for(std::list<Pipe>::iterator iter=pipes_.begin(); iter!= pipes_.end();) { if(iter->sender_ == sender && iter->receiver_ == receiver && iter->eventName_.compare(eventName) == 0) { iter = pipes_.erase(iter); removed = true; } else { ++iter; } } if(!removed) { UWARN("Pipe between sender %p and receiver %p with event %s didn't exist.", sender, receiver, eventName.c_str()); } pipesMutex_.unlock(); }
bool DBReader::init(int startIndex) { if(_dbDriver) { _dbDriver->closeConnection(); delete _dbDriver; _dbDriver = 0; } _ids.clear(); _currentId=_ids.end(); _previousStamp = 0; _previousMapID = 0; if(_paths.size() == 0) { UERROR("No database path set..."); return false; } std::string path = _paths.front(); if(!UFile::exists(path)) { UERROR("Database path does not exist (%s)", path.c_str()); return false; } rtabmap::ParametersMap parameters; parameters.insert(rtabmap::ParametersPair(rtabmap::Parameters::kDbSqlite3InMemory(), "false")); _dbDriver = new DBDriverSqlite3(parameters); if(!_dbDriver) { UERROR("Driver doesn't exist."); return false; } if(!_dbDriver->openConnection(path)) { UERROR("Can't open database %s", path.c_str()); delete _dbDriver; _dbDriver = 0; return false; } _dbDriver->getAllNodeIds(_ids); _currentId = _ids.begin(); if(startIndex>0 && _ids.size()) { std::set<int>::iterator iter = uIteratorAt(_ids, startIndex); if(iter == _ids.end()) { UWARN("Start index is too high (%d), the last in database is %d. Starting from beginning...", startIndex, _ids.size()-1); } else { _currentId = iter; } } return true; }
void Link::setUserDataRaw(const cv::Mat & userDataRaw) { if(!_userDataRaw.empty()) { UWARN("Writing new user data over existing user data. This may result in data loss."); } _userDataRaw = userDataRaw; }
void CameraThread::mainLoop() { UTimer timer; UDEBUG(""); SensorData data = _camera->takeImage(); if(!data.imageRaw().empty()) { if(_colorOnly && !data.depthRaw().empty()) { data.setDepthOrRightRaw(cv::Mat()); } if(_mirroring && data.cameraModels().size() == 1) { cv::Mat tmpRgb; cv::flip(data.imageRaw(), tmpRgb, 1); data.setImageRaw(tmpRgb); if(data.cameraModels()[0].cx()) { CameraModel tmpModel( data.cameraModels()[0].fx(), data.cameraModels()[0].fy(), float(data.imageRaw().cols) - data.cameraModels()[0].cx(), data.cameraModels()[0].cy(), data.cameraModels()[0].localTransform()); data.setCameraModel(tmpModel); } if(!data.depthRaw().empty()) { cv::Mat tmpDepth; cv::flip(data.depthRaw(), tmpDepth, 1); data.setDepthOrRightRaw(tmpDepth); } } if(_stereoToDepth && data.stereoCameraModel().isValid() && !data.rightRaw().empty()) { cv::Mat depth = util2d::depthFromDisparity( util2d::disparityFromStereoImages(data.imageRaw(), data.rightRaw()), data.stereoCameraModel().left().fx(), data.stereoCameraModel().baseline()); data.setCameraModel(data.stereoCameraModel().left()); data.setDepthOrRightRaw(depth); data.setStereoCameraModel(StereoCameraModel()); } this->post(new CameraEvent(data, _camera->getSerial())); } else if(!this->isKilled()) { UWARN("no more images..."); this->kill(); this->post(new CameraEvent()); } }
void CameraTcpServer::displayError(QAbstractSocket::SocketError socketError) { switch (socketError) { case QAbstractSocket::RemoteHostClosedError: break; case QAbstractSocket::HostNotFoundError: UWARN("CameraTcp: Tcp error: The host was not found. Please " "check the host name and port settings.\n"); break; case QAbstractSocket::ConnectionRefusedError: UWARN("CameraTcp: The connection was refused by the peer. " "Make sure your images server is running, " "and check that the host name and port " "settings are correct."); break; default: //UERROR("The following error occurred: %s.", this->errorString().toStdString().c_str()); break; } }
void ParametersToolBox::updateParameter(const std::string & key, const std::string & value) { QString group = QString::fromStdString(key).split("/").first(); if(!ignoredGroups_.contains(group)) { if(parameters_.find(key) == parameters_.end()) { UWARN("key=\"%s\" doesn't exist", key.c_str()); } else { parameters_.at(key) = value; QWidget * widget = this->findChild<QWidget*>(key.c_str()); QString type = QString::fromStdString(Parameters::getType(key)); if(type.compare("string") == 0) { QString valueQt = QString::fromStdString(value); if(valueQt.contains(';')) { // It's a list, just change the index QStringList splitted = valueQt.split(':'); ((QComboBox*)widget)->setCurrentIndex(splitted.first().toInt()); } else { ((QLineEdit*)widget)->setText(valueQt); } } else if(type.compare("int") == 0) { ((QSpinBox*)widget)->setValue(uStr2Int(value)); } else if(type.compare("uint") == 0) { ((QSpinBox*)widget)->setValue(uStr2Int(value)); } else if(type.compare("double") == 0) { ((QDoubleSpinBox*)widget)->setValue(uStr2Double(value)); } else if(type.compare("float") == 0) { ((QDoubleSpinBox*)widget)->setValue(uStr2Float(value)); } else if(type.compare("bool") == 0) { ((QCheckBox*)widget)->setChecked(uStr2Bool(value)); } } } }
void Odometry::reset(const Transform & initialPose) { previousVelocityTransform_.setNull(); previousGroundTruthPose_.setNull(); _resetCurrentCount = 0; previousStamp_ = 0; distanceTravelled_ = 0; if(_force3DoF || particleFilters_.size()) { float x,y,z, roll,pitch,yaw; initialPose.getTranslationAndEulerAngles(x, y, z, roll, pitch, yaw); if(_force3DoF) { if(z != 0.0f || roll != 0.0f || pitch != 0.0f) { UWARN("Force2D=true and the initial pose contains z, roll or pitch values (%s). They are set to null.", initialPose.prettyPrint().c_str()); } z = 0; roll = 0; pitch = 0; Transform pose(x, y, z, roll, pitch, yaw); _pose = pose; } else { _pose = initialPose; } if(particleFilters_.size()) { UASSERT(particleFilters_.size() == 6); particleFilters_[0]->init(x); particleFilters_[1]->init(y); particleFilters_[2]->init(z); particleFilters_[3]->init(roll); particleFilters_[4]->init(pitch); particleFilters_[5]->init(yaw); } if(_filteringStrategy == 1) { initKalmanFilter(initialPose); } } else { _pose = initialPose; } }
std::vector<cv::Point2f> StereoOpticalFlow::computeCorrespondences( const cv::Mat & leftImage, const cv::Mat & rightImage, const std::vector<cv::Point2f> & leftCorners, std::vector<unsigned char> & status) const { std::vector<cv::Point2f> rightCorners; UDEBUG("util2d::calcOpticalFlowPyrLKStereo() begin"); std::vector<float> err; util2d::calcOpticalFlowPyrLKStereo( leftImage, rightImage, leftCorners, rightCorners, status, err, this->winSize(), this->maxLevel(), cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, this->iterations(), epsilon_), cv::OPTFLOW_LK_GET_MIN_EIGENVALS, 1e-4); UDEBUG("util2d::calcOpticalFlowPyrLKStereo() end"); UASSERT(leftCorners.size() == rightCorners.size() && status.size() == leftCorners.size()); int countFlowRejected = 0; int countDisparityRejected = 0; for(unsigned int i=0; i<status.size(); ++i) { if(status[i]!=0) { float disparity = leftCorners[i].x - rightCorners[i].x; if(disparity < float(this->minDisparity()) || disparity > float(this->maxDisparity())) { status[i] = 0; ++countDisparityRejected; } } else { ++countFlowRejected; } } UDEBUG("total=%d countFlowRejected=%d countDisparityRejected=%d", (int)status.size(), countFlowRejected, countDisparityRejected); if(countFlowRejected + countDisparityRejected > (int)status.size()/2) { UWARN("A large number (%d/%d) of stereo correspondences are rejected! Optical flow may have failed, images are not calibrated or the background is too far (no disparity between the images).", countFlowRejected+countDisparityRejected, (int)status.size()); } return rightCorners; }
void ImageView::setFeatureColor(int id, const QColor & color) { QList<KeypointItem*> items = _features.values(id); if(items.size()) { for(int i=0; i<items.size(); ++i) { items[i]->setColor(color); } } else { UWARN("Not found feature %d", id); } }
SensorData Camera::takeImage(CameraInfo * info) { bool warnFrameRateTooHigh = false; float actualFrameRate = 0; if(_imageRate>0) { int sleepTime = (1000.0f/_imageRate - 1000.0f*_frameRateTimer->getElapsedTime()); if(sleepTime > 2) { uSleep(sleepTime-2); } else if(sleepTime < 0) { warnFrameRateTooHigh = true; actualFrameRate = 1.0/(_frameRateTimer->getElapsedTime()); } // Add precision at the cost of a small overhead while(_frameRateTimer->getElapsedTime() < 1.0/double(_imageRate)-0.000001) { // } double slept = _frameRateTimer->getElapsedTime(); _frameRateTimer->start(); UDEBUG("slept=%fs vs target=%fs", slept, 1.0/double(_imageRate)); } UTimer timer; SensorData data = this->captureImage(info); double captureTime = timer.ticks(); if(warnFrameRateTooHigh) { UWARN("Camera: Cannot reach target image rate %f Hz, current rate is %f Hz and capture time = %f s.", _imageRate, actualFrameRate, captureTime); } else { UDEBUG("Time capturing image = %fs", captureTime); } if(info) { info->id = data.id(); info->stamp = data.stamp(); info->timeCapture = captureTime; } return data; }
void SensorData::setUserDataRaw(const cv::Mat & userDataRaw) { if(!userDataRaw.empty() && (!_userDataCompressed.empty() || !_userDataRaw.empty())) { UWARN("Cannot write new user data (%d bytes) over existing user " "data (%d bytes, %d compressed). Set user data of %d to null " "before setting a new one.", int(userDataRaw.total()*userDataRaw.elemSize()), int(_userDataRaw.total()*_userDataRaw.elemSize()), _userDataCompressed.cols, this->id()); return; } _userDataRaw = userDataRaw; _userDataCompressed = cv::Mat(); }
void UEventsManager::_createPipe( const UEventsSender * sender, const UEventsHandler * receiver, const std::string & eventName) { pipesMutex_.lock(); bool exist = false; for(std::list<Pipe>::iterator iter=pipes_.begin(); iter!= pipes_.end();++iter) { if(iter->sender_ == sender && iter->receiver_ == receiver && iter->eventName_.compare(eventName) == 0) { exist = true; break; } } if(!exist) { bool handlerFound = false; handlersMutex_.lock(); for(std::list<UEventsHandler*>::iterator iter=handlers_.begin(); iter!=handlers_.end(); ++iter) { if(*iter == receiver) { handlerFound = true; break; } } handlersMutex_.unlock(); if(handlerFound) { pipes_.push_back(Pipe(sender, receiver, eventName)); } else { UERROR("Cannot create the pipe because the receiver is not yet " "added to UEventsManager's handlers list."); } } else { UWARN("Pipe between sender %p and receiver %p with event %s was already created.", sender, receiver, eventName.c_str()); } pipesMutex_.unlock(); }
void Odometry::reset(const Transform & initialPose) { _resetCurrentCount = 0; if(_force2D) { float x,y,z, roll,pitch,yaw; initialPose.getTranslationAndEulerAngles(x, y, z, roll, pitch, yaw); if(z != 0.0f || roll != 0.0f || yaw != 0.0f) { UWARN("Force2D=true and the initial pose contains z, roll or pitch values (%s). They are set to null.", initialPose.prettyPrint().c_str()); } Transform pose(x, y, 0, 0, 0, yaw); _pose = pose; } else { _pose = initialPose; } }
void CameraTcpServer::incomingConnection(int socketDescriptor) { QList<QTcpSocket*> clients = this->findChildren<QTcpSocket*>(); if(clients.size() >= 1) { UWARN("A client is already connected. Only one connection allowed at the same time."); QTcpSocket socket; socket.setSocketDescriptor(socketDescriptor); socket.close(); // close without sending an acknowledge } else { QTcpSocket * socket = new QTcpSocket(this); connect(socket, SIGNAL(readyRead()), this, SLOT(readReceivedData())); connect(socket, SIGNAL(error(QAbstractSocket::SocketError)), this, SLOT(displayError(QAbstractSocket::SocketError))); connect(socket, SIGNAL(disconnected()), this, SLOT(connectionLost())); socket->setSocketDescriptor(socketDescriptor); socket->write(QByteArray("1")); // send acknowledge } }
void Link::setUserData(const cv::Mat & userData) { if(!userData.empty() && (!_userDataCompressed.empty() || !_userDataRaw.empty())) { UWARN("Writing new user data over existing user data. This may result in data loss."); } _userDataRaw = cv::Mat(); _userDataCompressed = cv::Mat(); if(!userData.empty()) { if(userData.type() == CV_8UC1) // Bytes { _userDataCompressed = userData; // assume compressed } else { _userDataRaw = userData; _userDataCompressed = compressData2(userData); } } }
void CameraThread::mainLoop() { UTimer totalTime; UDEBUG(""); CameraInfo info; SensorData data = _camera->takeImage(&info); if(!data.imageRaw().empty() || (dynamic_cast<DBReader*>(_camera) != 0 && data.id()>0)) // intermediate nodes could not have image set { postUpdate(&data, &info); info.cameraName = _camera->getSerial(); info.timeTotal = totalTime.ticks(); this->post(new CameraEvent(data, info)); } else if(!this->isKilled()) { UWARN("no more images..."); this->kill(); this->post(new CameraEvent()); } }
/********************************* Fonction RecordFichier Permet d'enregistrer un fichier WAV *********************************/ void UAudioCaptureMic::mainLoop() { if(!_sound) { UERROR("Recorder is not initialized."); this->kill(); return; } FMOD_RESULT result; void *ptr1 = 0, *ptr2 = 0; int blockLength; unsigned int len1 = 0, len2 = 0; unsigned int recordPos = 0; result = UAudioSystem::getRecordPosition(_driver, &recordPos); UASSERT_MSG(result==FMOD_OK, FMOD_ErrorString(result)); if (recordPos != _lastRecordPos) { blockLength = (int)recordPos - (int)_lastRecordPos; if (blockLength < 0) { blockLength += _soundLength; } // * exinfo.numchannels * 2 = stereo 16bit. 1 sample = 4 bytes. // Lock the sound to get access to the raw data. FMOD_Sound_Lock(_sound, _lastRecordPos * channels() * bytesPerSample(), blockLength * channels() * bytesPerSample(), &ptr1, &ptr2, &len1, &len2); { if (ptr1 && len1) // Write it to disk. { if(_fp) { //write to file _dataLength += fwrite(ptr1, 1, len1, _fp); } // push back in the frames buffer pushBackSamples(ptr1, len1); } if (ptr2 && len2) // Write it to disk. { if(_fp) { //write to file _dataLength += fwrite(ptr2, 1, len2, _fp); } // push back in the frames buffer pushBackSamples(ptr2, len2); } } //Unlock the sound to allow FMOD to use it again. FMOD_Sound_Unlock(_sound, ptr1, ptr2, len1, len2); _lastRecordPos = recordPos; } UAudioSystem::update(); uSleep(10); // If we are recording to a file, make sure to stop // when the maximum file size is reached if (_fp && _maxFileSize != 0 && int(_dataLength) + frameLength()*bytesPerSample() > _maxFileSize) { UWARN("Recording max memory reached (%d Mb)... stopped", _maxFileSize/1000000); this->kill(); } }
// return not null transform if odometry is correctly computed Transform OdometryDVO::computeTransform( SensorData & data, const Transform & guess, OdometryInfo * info) { Transform t; #ifdef RTABMAP_DVO UTimer timer; if(data.imageRaw().empty() || data.imageRaw().rows != data.depthOrRightRaw().rows || data.imageRaw().cols != data.depthOrRightRaw().cols) { UERROR("Not supported input!"); return t; } if(!(data.cameraModels().size() == 1 && data.cameraModels()[0].isValidForReprojection())) { UERROR("Invalid camera model! Only single RGB-D camera supported by DVO. Try another odometry approach."); return t; } if(dvo_ == 0) { dvo::DenseTracker::Config cfg = dvo::DenseTracker::getDefaultConfig(); dvo_ = new dvo::DenseTracker(cfg); } cv::Mat grey, grey_s16, depth_inpainted, depth_mask, depth_mono, depth_float; if(data.imageRaw().type() != CV_32FC1) { if(data.imageRaw().type() == CV_8UC3) { cv::cvtColor(data.imageRaw(), grey, CV_BGR2GRAY); } else { grey = data.imageRaw(); } grey.convertTo(grey_s16, CV_32F); } else { grey_s16 = data.imageRaw(); } // make sure all zeros are NAN if(data.depthRaw().type() == CV_32FC1) { depth_float = data.depthRaw(); for(int i=0; i<depth_float.rows; ++i) { for(int j=0; j<depth_float.cols; ++j) { float & d = depth_float.at<float>(i,j); if(d == 0.0f) { d = NAN; } } } } else if(data.depthRaw().type() == CV_16UC1) { depth_float = cv::Mat(data.depthRaw().size(), CV_32FC1); for(int i=0; i<data.depthRaw().rows; ++i) { for(int j=0; j<data.depthRaw().cols; ++j) { float d = float(data.depthRaw().at<unsigned short>(i,j))/1000.0f; depth_float.at<float>(i, j) = d==0.0f?NAN:d; } } } else { UFATAL("Unknown depth format!"); } if(camera_ == 0) { dvo::core::IntrinsicMatrix intrinsics = dvo::core::IntrinsicMatrix::create( data.cameraModels()[0].fx(), data.cameraModels()[0].fy(), data.cameraModels()[0].cx(), data.cameraModels()[0].cy()); camera_ = new dvo::core::RgbdCameraPyramid( data.cameraModels()[0].imageWidth(), data.cameraModels()[0].imageHeight(), intrinsics); } dvo::core::RgbdImagePyramid * current = new dvo::core::RgbdImagePyramid(*camera_, grey_s16, depth_float); cv::Mat covariance; if(reference_ == 0) { reference_ = current; if(!lost_) { t.setIdentity(); } covariance = cv::Mat::eye(6,6,CV_64FC1) * 9999.0; } else { dvo::DenseTracker::Result result; dvo_->match(*reference_, *current, result); t = Transform::fromEigen3d(result.Transformation); if(result.Information(0,0) > 0.0 && result.Information(0,0) != 1.0) { lost_ = false; cv::Mat information = cv::Mat::eye(6,6, CV_64FC1); memcpy(information.data, result.Information.data(), 36*sizeof(double)); covariance = information.inv(); covariance *= 100.0; // to be in the same scale than loop closure detection Transform currentMotion = t; t = motionFromKeyFrame_.inverse() * t; // TODO make parameters? if(currentMotion.getNorm() > 0.01 || currentMotion.getAngle() > 0.01) { if(info) { info->keyFrameAdded = true; } // new keyframe delete reference_; reference_ = current; motionFromKeyFrame_.setIdentity(); } else { delete current; motionFromKeyFrame_ = currentMotion; } } else { lost_ = true; delete reference_; delete current; reference_ = 0; // this will make restart from the next frame motionFromKeyFrame_.setIdentity(); t.setNull(); covariance = cv::Mat::eye(6,6,CV_64FC1) * 9999.0; UWARN("dvo failed to estimate motion, tracking will be reinitialized on next frame."); } } const Transform & localTransform = data.cameraModels()[0].localTransform(); if(!t.isNull() && !t.isIdentity() && !localTransform.isIdentity() && !localTransform.isNull()) { // from camera frame to base frame t = localTransform * t * localTransform.inverse(); } if(info) { info->type = (int)kTypeDVO; info->covariance = covariance; } UINFO("Odom update time = %fs", timer.elapsed()); #else UERROR("RTAB-Map is not built with DVO support! Select another visual odometry approach."); #endif return t; }
Transform Odometry::process(SensorData & data, OdometryInfo * info) { if(_pose.isNull()) { _pose.setIdentity(); // initialized } UASSERT(!data.imageRaw().empty()); if(!data.stereoCameraModel().isValidForProjection() && (data.cameraModels().size() == 0 || !data.cameraModels()[0].isValidForProjection())) { UERROR("Rectified images required! Calibrate your camera."); return Transform(); } double dt = previousStamp_>0.0f?data.stamp() - previousStamp_:0.0; Transform guess = dt && guessFromMotion_?Transform::getIdentity():Transform(); UASSERT_MSG(dt>0.0 || (dt == 0.0 && previousVelocityTransform_.isNull()), uFormat("dt=%f previous transform=%s", dt, previousVelocityTransform_.prettyPrint().c_str()).c_str()); if(!previousVelocityTransform_.isNull()) { if(guessFromMotion_) { if(_filteringStrategy == 1) { // use Kalman predict transform float vx,vy,vz, vroll,vpitch,vyaw; predictKalmanFilter(dt, &vx,&vy,&vz,&vroll,&vpitch,&vyaw); guess = Transform(vx*dt, vy*dt, vz*dt, vroll*dt, vpitch*dt, vyaw*dt); } else { float vx,vy,vz, vroll,vpitch,vyaw; previousVelocityTransform_.getTranslationAndEulerAngles(vx,vy,vz, vroll,vpitch,vyaw); guess = Transform(vx*dt, vy*dt, vz*dt, vroll*dt, vpitch*dt, vyaw*dt); } } else if(_filteringStrategy == 1) { predictKalmanFilter(dt); } } UTimer time; Transform t; if(_imageDecimation > 1) { // Decimation of images with calibrations SensorData decimatedData = data; decimatedData.setImageRaw(util2d::decimate(decimatedData.imageRaw(), _imageDecimation)); decimatedData.setDepthOrRightRaw(util2d::decimate(decimatedData.depthOrRightRaw(), _imageDecimation)); std::vector<CameraModel> cameraModels = decimatedData.cameraModels(); for(unsigned int i=0; i<cameraModels.size(); ++i) { cameraModels[i] = cameraModels[i].scaled(1.0/double(_imageDecimation)); } decimatedData.setCameraModels(cameraModels); StereoCameraModel stereoModel = decimatedData.stereoCameraModel(); if(stereoModel.isValidForProjection()) { stereoModel.scale(1.0/double(_imageDecimation)); } decimatedData.setStereoCameraModel(stereoModel); // compute transform t = this->computeTransform(decimatedData, guess, info); // transform back the keypoints in the original image std::vector<cv::KeyPoint> kpts = decimatedData.keypoints(); double log2value = log(double(_imageDecimation))/log(2.0); for(unsigned int i=0; i<kpts.size(); ++i) { kpts[i].pt.x *= _imageDecimation; kpts[i].pt.y *= _imageDecimation; kpts[i].size *= _imageDecimation; kpts[i].octave += log2value; } data.setFeatures(kpts, decimatedData.descriptors()); if(info) { UASSERT(info->newCorners.size() == info->refCorners.size()); for(unsigned int i=0; i<info->newCorners.size(); ++i) { info->refCorners[i].x *= _imageDecimation; info->refCorners[i].y *= _imageDecimation; info->newCorners[i].x *= _imageDecimation; info->newCorners[i].y *= _imageDecimation; } for(std::multimap<int, cv::KeyPoint>::iterator iter=info->words.begin(); iter!=info->words.end(); ++iter) { iter->second.pt.x *= _imageDecimation; iter->second.pt.y *= _imageDecimation; iter->second.size *= _imageDecimation; iter->second.octave += log2value; } } } else { t = this->computeTransform(data, guess, info); } if(info) { info->timeEstimation = time.ticks(); info->lost = t.isNull(); info->stamp = data.stamp(); info->interval = dt; info->transform = t; if(!data.groundTruth().isNull()) { if(!previousGroundTruthPose_.isNull()) { info->transformGroundTruth = previousGroundTruthPose_.inverse() * data.groundTruth(); } previousGroundTruthPose_ = data.groundTruth(); } } if(!t.isNull()) { _resetCurrentCount = _resetCountdown; float vx,vy,vz, vroll,vpitch,vyaw; t.getTranslationAndEulerAngles(vx,vy,vz, vroll,vpitch,vyaw); // transform to velocity if(dt) { vx /= dt; vy /= dt; vz /= dt; vroll /= dt; vpitch /= dt; vyaw /= dt; } if(_force3DoF || !_holonomic || particleFilters_.size() || _filteringStrategy==1) { if(_filteringStrategy == 1) { if(previousVelocityTransform_.isNull()) { // reset Kalman if(dt) { initKalmanFilter(t, vx,vy,vz,vroll,vpitch,vyaw); } else { initKalmanFilter(t); } } else { // Kalman filtering updateKalmanFilter(vx,vy,vz,vroll,vpitch,vyaw); } } else if(particleFilters_.size()) { // Particle filtering UASSERT(particleFilters_.size()==6); if(previousVelocityTransform_.isNull()) { particleFilters_[0]->init(vx); particleFilters_[1]->init(vy); particleFilters_[2]->init(vz); particleFilters_[3]->init(vroll); particleFilters_[4]->init(vpitch); particleFilters_[5]->init(vyaw); } else { vx = particleFilters_[0]->filter(vx); vy = particleFilters_[1]->filter(vy); vyaw = particleFilters_[5]->filter(vyaw); if(!_holonomic) { // arc trajectory around ICR float tmpY = vyaw!=0.0f ? vx / tan((CV_PI-vyaw)/2.0f) : 0.0f; if(fabs(tmpY) < fabs(vy) || (tmpY<=0 && vy >=0) || (tmpY>=0 && vy<=0)) { vy = tmpY; } else { vyaw = (atan(vx/vy)*2.0f-CV_PI)*-1; } } if(!_force3DoF) { vz = particleFilters_[2]->filter(vz); vroll = particleFilters_[3]->filter(vroll); vpitch = particleFilters_[4]->filter(vpitch); } } if(info) { info->timeParticleFiltering = time.ticks(); } if(_force3DoF) { vz = 0.0f; vroll = 0.0f; vpitch = 0.0f; } } else if(!_holonomic) { // arc trajectory around ICR vy = vyaw!=0.0f ? vx / tan((CV_PI-vyaw)/2.0f) : 0.0f; if(_force3DoF) { vz = 0.0f; vroll = 0.0f; vpitch = 0.0f; } } if(dt) { t = Transform(vx*dt, vy*dt, vz*dt, vroll*dt, vpitch*dt, vyaw*dt); } else { t = Transform(vx, vy, vz, vroll, vpitch, vyaw); } if(info) { info->transformFiltered = t; } } previousStamp_ = data.stamp(); previousVelocityTransform_.setNull(); if(dt) { previousVelocityTransform_ = Transform(vx, vy, vz, vroll, vpitch, vyaw); } if(info) { distanceTravelled_ += t.getNorm(); info->distanceTravelled = distanceTravelled_; } return _pose *= t; // updated } else if(_resetCurrentCount > 0) { UWARN("Odometry lost! Odometry will be reset after next %d consecutive unsuccessful odometry updates...", _resetCurrentCount); --_resetCurrentCount; if(_resetCurrentCount == 0) { UWARN("Odometry automatically reset to latest pose!"); this->reset(_pose); } } previousVelocityTransform_.setNull(); previousStamp_ = 0; return Transform(); }
cv::Mat CameraImages::captureImage() { UDEBUG(""); cv::Mat img; if(_dir->isValid()) { if(_refreshDir) { _dir->update(); } if(_startAt == 0) { const std::list<std::string> & fileNames = _dir->getFileNames(); if(fileNames.size()) { if(_lastFileName.empty() || uStrNumCmp(_lastFileName,*fileNames.rbegin()) < 0) { _lastFileName = *fileNames.rbegin(); std::string fullPath = _path + _lastFileName; img = cv::imread(fullPath.c_str()); } } } else { std::string fileName; std::string fullPath; fileName = _dir->getNextFileName(); if(fileName.size()) { fullPath = _path + fileName; while(++_count < _startAt && (fileName = _dir->getNextFileName()).size()) { fullPath = _path + fileName; } if(fileName.size()) { ULOGGER_DEBUG("Loading image : %s", fullPath.c_str()); #if CV_MAJOR_VERSION >=2 and CV_MINOR_VERSION >=4 img = cv::imread(fullPath.c_str(), cv::IMREAD_UNCHANGED); #else img = cv::imread(fullPath.c_str(), -1); #endif UDEBUG("width=%d, height=%d, channels=%d, elementSize=%d, total=%d", img.cols, img.rows, img.channels(), img.elemSize(), img.total()); // FIXME : it seems that some png are incorrectly loaded with opencv c++ interface, where c interface works... if(img.depth() != CV_8U) { // The depth should be 8U UWARN("Cannot read the image correctly, falling back to old OpenCV C interface..."); IplImage * i = cvLoadImage(fullPath.c_str()); img = cv::Mat(i, true); cvReleaseImage(&i); } } } } } else { UWARN("Directory is not set, camera must be initialized."); } unsigned int w; unsigned int h; this->getImageSize(w, h); if(!img.empty() && w && h && w != (unsigned int)img.cols && h != (unsigned int)img.rows) { cv::Mat resampled; cv::resize(img, resampled, cv::Size(w, h)); img = resampled; } return img; }
cv::Mat Camera::takeImage(cv::Mat & descriptors, std::vector<cv::KeyPoint> & keypoints) { descriptors = cv::Mat(); keypoints.clear(); float imageRate = _imageRate==0.0f?33.0f:_imageRate; // limit to 33Hz if infinity if(imageRate>0) { int sleepTime = (1000.0f/imageRate - 1000.0f*_frameRateTimer->getElapsedTime()); if(sleepTime > 2) { uSleep(sleepTime-2); } // Add precision at the cost of a small overhead while(_frameRateTimer->getElapsedTime() < 1.0/double(imageRate)-0.000001) { // } double slept = _frameRateTimer->getElapsedTime(); _frameRateTimer->start(); UDEBUG("slept=%fs vs target=%fs", slept, 1.0/double(imageRate)); } cv::Mat img; UTimer timer; img = this->captureImage(); UDEBUG("Time capturing image = %fs", timer.ticks()); if(!img.empty()) { if(img.depth() != CV_8U) { UWARN("Images should have already 8U depth !?"); cv::Mat tmp = img; img = cv::Mat(); tmp.convertTo(img, CV_8U); UDEBUG("Time converting image to 8U = %fs", timer.ticks()); } if(_featuresExtracted && _keypointDetector && _keypointDescriptor) { keypoints = _keypointDetector->generateKeypoints(img); descriptors = _keypointDescriptor->generateDescriptors(img, keypoints); UDEBUG("Post treatment time = %fs", timer.ticks()); } if(_framesDropped) { unsigned int count = 0; while(count++ < _framesDropped) { cv::Mat tmp = this->captureImage(); if(!tmp.empty()) { UDEBUG("frame dropped (%d/%d)", (int)count, (int)_framesDropped); } else { break; } } UDEBUG("Frames dropped time = %fs", timer.ticks()); } } return img; }
bool CameraModel::load(const std::string & directory, const std::string & cameraName) { K_ = cv::Mat(); D_ = cv::Mat(); R_ = cv::Mat(); P_ = cv::Mat(); mapX_ = cv::Mat(); mapY_ = cv::Mat(); name_.clear(); imageSize_ = cv::Size(); std::string filePath = directory+"/"+cameraName+".yaml"; if(UFile::exists(filePath)) { try { UINFO("Reading calibration file \"%s\"", filePath.c_str()); cv::FileStorage fs(filePath, cv::FileStorage::READ); cv::FileNode n,n2; n = fs["camera_name"]; if(n.type() != cv::FileNode::NONE) { name_ = (int)n; } else { UWARN("Missing \"camera_name\" field in \"%s\"", filePath.c_str()); } n = fs["image_width"]; n2 = fs["image_height"]; if(n.type() != cv::FileNode::NONE) { imageSize_.width = (int)fs["image_width"]; imageSize_.height = (int)fs["image_height"]; } else { UWARN("Missing \"image_width\" and/or \"image_height\" fields in \"%s\"", filePath.c_str()); } // import from ROS calibration format n = fs["camera_matrix"]; if(n.type() != cv::FileNode::NONE) { int rows = (int)n["rows"]; int cols = (int)n["cols"]; std::vector<double> data; n["data"] >> data; UASSERT(rows*cols == (int)data.size()); UASSERT(rows == 3 && cols == 3); K_ = cv::Mat(rows, cols, CV_64FC1, data.data()).clone(); } else { UWARN("Missing \"camera_matrix\" field in \"%s\"", filePath.c_str()); } n = fs["distortion_coefficients"]; if(n.type() != cv::FileNode::NONE) { int rows = (int)n["rows"]; int cols = (int)n["cols"]; std::vector<double> data; n["data"] >> data; UASSERT(rows*cols == (int)data.size()); UASSERT(rows == 1 && (cols == 4 || cols == 5 || cols == 8)); D_ = cv::Mat(rows, cols, CV_64FC1, data.data()).clone(); } else {
void IMUThread::mainLoop() { UTimer totalTime; UDEBUG(""); if(rate_>0 || captureDelay_) { double delay = rate_>0?1000.0/double(rate_):1000.0f*captureDelay_; int sleepTime = delay - 1000.0f*frameRateTimer_.getElapsedTime(); if(sleepTime > 2) { uSleep(sleepTime-2); } // Add precision at the cost of a small overhead delay/=1000.0; while(frameRateTimer_.getElapsedTime() < delay-0.000001) { // } frameRateTimer_.start(); } captureDelay_ = 0.0; std::string line; if (std::getline(imuFile_, line)) { std::stringstream stream(line); std::string s; std::getline(stream, s, ','); std::string nanoseconds = s.substr(s.size() - 9, 9); std::string seconds = s.substr(0, s.size() - 9); cv::Vec3d gyr; for (int j = 0; j < 3; ++j) { std::getline(stream, s, ','); gyr[j] = uStr2Double(s); } cv::Vec3d acc; for (int j = 0; j < 3; ++j) { std::getline(stream, s, ','); acc[j] = uStr2Double(s); } double stamp = double(uStr2Int(seconds)) + double(uStr2Int(nanoseconds))*1e-9; if(previousStamp_>0 && stamp > previousStamp_) { captureDelay_ = stamp - previousStamp_; } previousStamp_ = stamp; IMU imu(gyr, cv::Mat(), acc, cv::Mat(), localTransform_); this->post(new IMUEvent(imu, stamp)); } else if(!this->isKilled()) { UWARN("no more imu data..."); this->kill(); this->post(new IMUEvent()); } }
void SensorData::uncompressDataConst(cv::Mat * imageRaw, cv::Mat * depthRaw, cv::Mat * laserScanRaw, cv::Mat * userDataRaw) const { if(imageRaw) { *imageRaw = _imageRaw; } if(depthRaw) { *depthRaw = _depthOrRightRaw; } if(laserScanRaw) { *laserScanRaw = _laserScanRaw; } if(userDataRaw) { *userDataRaw = _userDataRaw; } if( (imageRaw && imageRaw->empty()) || (depthRaw && depthRaw->empty()) || (laserScanRaw && laserScanRaw->empty()) || (userDataRaw && userDataRaw->empty())) { rtabmap::CompressionThread ctImage(_imageCompressed, true); rtabmap::CompressionThread ctDepth(_depthOrRightCompressed, true); rtabmap::CompressionThread ctLaserScan(_laserScanCompressed, false); rtabmap::CompressionThread ctUserData(_userDataCompressed, false); if(imageRaw && imageRaw->empty()) { ctImage.start(); } if(depthRaw && depthRaw->empty()) { ctDepth.start(); } if(laserScanRaw && laserScanRaw->empty()) { ctLaserScan.start(); } if(userDataRaw && userDataRaw->empty()) { ctUserData.start(); } ctImage.join(); ctDepth.join(); ctLaserScan.join(); ctUserData.join(); if(imageRaw && imageRaw->empty()) { *imageRaw = ctImage.getUncompressedData(); if(imageRaw->empty()) { UWARN("Requested raw image data, but the sensor data (%d) doesn't have image.", this->id()); } } if(depthRaw && depthRaw->empty()) { *depthRaw = ctDepth.getUncompressedData(); if(depthRaw->empty()) { UWARN("Requested depth/right image data, but the sensor data (%d) doesn't have depth/right image.", this->id()); } } if(laserScanRaw && laserScanRaw->empty()) { *laserScanRaw = ctLaserScan.getUncompressedData(); if(laserScanRaw->empty()) { UWARN("Requested laser scan data, but the sensor data (%d) doesn't have laser scan.", this->id()); } } if(userDataRaw && userDataRaw->empty()) { *userDataRaw = ctUserData.getUncompressedData(); if(userDataRaw->empty()) { UWARN("Requested user data, but the sensor data (%d) doesn't have user data.", this->id()); } } } }
void BayesFilter::normalize(cv::Mat & prediction, unsigned int index, float addedProbabilitiesSum, bool virtualPlaceUsed) const { UASSERT(index < (unsigned int)prediction.rows && index < (unsigned int)prediction.cols); int cols = prediction.cols; // ADD values of not found neighbors to loop closure if(addedProbabilitiesSum < _totalPredictionLCValues-_predictionLC[0]) { float delta = _totalPredictionLCValues-_predictionLC[0]-addedProbabilitiesSum; ((float*)prediction.data)[index + index*cols] += delta; addedProbabilitiesSum+=delta; } float allOtherPlacesValue = 0; if(_totalPredictionLCValues < 1) { allOtherPlacesValue = 1.0f - _totalPredictionLCValues; } // Set all loop events to small values according to the model if(allOtherPlacesValue > 0 && cols>1) { float value = allOtherPlacesValue / float(cols - 1); for(int j=virtualPlaceUsed?1:0; j<cols; ++j) { if(((float*)prediction.data)[index + j*cols] == 0) { ((float*)prediction.data)[index + j*cols] = value; addedProbabilitiesSum += ((float*)prediction.data)[index + j*cols]; } } } //normalize this row float maxNorm = 1 - (virtualPlaceUsed?_predictionLC[0]:0); // 1 - virtual place probability if(addedProbabilitiesSum<maxNorm-0.0001 || addedProbabilitiesSum>maxNorm+0.0001) { for(int j=virtualPlaceUsed?1:0; j<cols; ++j) { ((float*)prediction.data)[index + j*cols] *= maxNorm / addedProbabilitiesSum; } addedProbabilitiesSum = maxNorm; } // ADD virtual place prob if(virtualPlaceUsed) { ((float*)prediction.data)[index] = _predictionLC[0]; addedProbabilitiesSum += ((float*)prediction.data)[index]; } //debug //for(int j=0; j<cols; ++j) //{ // ULOGGER_DEBUG("test col=%d = %f", i, prediction.data.fl[i + j*cols]); //} if(addedProbabilitiesSum<0.99 || addedProbabilitiesSum > 1.01) { UWARN("Prediction is not normalized sum=%f", addedProbabilitiesSum); } }