void CalibrationDialog::restart() { // restart savedCalibration_ = false; imagePoints_[0].clear(); imagePoints_[1].clear(); imageParams_[0].clear(); imageParams_[1].clear(); stereoImagePoints_[0].clear(); stereoImagePoints_[1].clear(); models_[0] = CameraModel(); models_[1] = CameraModel(); stereoModel_ = StereoCameraModel(); cameraName_.clear(); minIrs_[0] = 0x0000; maxIrs_[0] = 0x7fff; minIrs_[1] = 0x0000; maxIrs_[1] = 0x7fff; ui_->pushButton_calibrate->setEnabled(false); ui_->pushButton_save->setEnabled(false); ui_->radioButton_raw->setChecked(true); ui_->radioButton_rectified->setEnabled(false); ui_->radioButton_stereoRectified->setEnabled(false); ui_->progressBar_count->reset(); ui_->progressBar_count->setMaximum(COUNT_MIN); ui_->progressBar_x->reset(); ui_->progressBar_y->reset(); ui_->progressBar_size->reset(); ui_->progressBar_skew->reset(); ui_->progressBar_count_2->reset(); ui_->progressBar_count_2->setMaximum(COUNT_MIN); ui_->progressBar_x_2->reset(); ui_->progressBar_y_2->reset(); ui_->progressBar_size_2->reset(); ui_->progressBar_skew_2->reset(); ui_->label_serial->clear(); ui_->label_fx->setNum(0); ui_->label_fy->setNum(0); ui_->label_cx->setNum(0); ui_->label_cy->setNum(0); ui_->label_baseline->setNum(0); ui_->label_error->setNum(0); ui_->lineEdit_K->clear(); ui_->lineEdit_D->clear(); ui_->lineEdit_R->clear(); ui_->lineEdit_P->clear(); ui_->label_fx_2->setNum(0); ui_->label_fy_2->setNum(0); ui_->label_cx_2->setNum(0); ui_->label_cy_2->setNum(0); ui_->lineEdit_K_2->clear(); ui_->lineEdit_D_2->clear(); ui_->lineEdit_R_2->clear(); ui_->lineEdit_P_2->clear(); }
void CameraThread::mainLoop() { UTimer timer; UDEBUG(""); SensorData data = _camera->takeImage(); if(!data.imageRaw().empty()) { if(_colorOnly && !data.depthRaw().empty()) { data.setDepthOrRightRaw(cv::Mat()); } if(_mirroring && data.cameraModels().size() == 1) { cv::Mat tmpRgb; cv::flip(data.imageRaw(), tmpRgb, 1); data.setImageRaw(tmpRgb); if(data.cameraModels()[0].cx()) { CameraModel tmpModel( data.cameraModels()[0].fx(), data.cameraModels()[0].fy(), float(data.imageRaw().cols) - data.cameraModels()[0].cx(), data.cameraModels()[0].cy(), data.cameraModels()[0].localTransform()); data.setCameraModel(tmpModel); } if(!data.depthRaw().empty()) { cv::Mat tmpDepth; cv::flip(data.depthRaw(), tmpDepth, 1); data.setDepthOrRightRaw(tmpDepth); } } if(_stereoToDepth && data.stereoCameraModel().isValid() && !data.rightRaw().empty()) { cv::Mat depth = util2d::depthFromDisparity( util2d::disparityFromStereoImages(data.imageRaw(), data.rightRaw()), data.stereoCameraModel().left().fx(), data.stereoCameraModel().baseline()); data.setCameraModel(data.stereoCameraModel().left()); data.setDepthOrRightRaw(depth); data.setStereoCameraModel(StereoCameraModel()); } this->post(new CameraEvent(data, _camera->getSerial())); } else if(!this->isKilled()) { UWARN("no more images..."); this->kill(); this->post(new CameraEvent()); } }
void CalibrationDialog::calibrate() { processingData_ = true; savedCalibration_ = false; QMessageBox mb(QMessageBox::Information, tr("Calibrating..."), tr("Operation in progress...")); mb.show(); QApplication::processEvents(); uSleep(100); // hack make sure the text in the QMessageBox is shown... QApplication::processEvents(); std::vector<std::vector<cv::Point3f> > objectPoints(1); cv::Size boardSize(ui_->spinBox_boardWidth->value(), ui_->spinBox_boardHeight->value()); float squareSize = ui_->doubleSpinBox_squareSize->value(); // compute board corner positions for( int i = 0; i < boardSize.height; ++i ) for( int j = 0; j < boardSize.width; ++j ) objectPoints[0].push_back(cv::Point3f(float( j*squareSize ), float( i*squareSize ), 0)); for(int id=0; id<(stereo_?2:1); ++id) { UINFO("Calibrating camera %d (samples=%d)", id, (int)imagePoints_[id].size()); objectPoints.resize(imagePoints_[id].size(), objectPoints[0]); //calibrate std::vector<cv::Mat> rvecs, tvecs; std::vector<float> reprojErrs; cv::Mat K, D; K = cv::Mat::eye(3,3,CV_64FC1); UINFO("calibrate!"); //Find intrinsic and extrinsic camera parameters double rms = cv::calibrateCamera(objectPoints, imagePoints_[id], imageSize_[id], K, D, rvecs, tvecs); UINFO("Re-projection error reported by calibrateCamera: %f", rms); // compute reprojection errors std::vector<cv::Point2f> imagePoints2; int i, totalPoints = 0; double totalErr = 0, err; reprojErrs.resize(objectPoints.size()); for( i = 0; i < (int)objectPoints.size(); ++i ) { cv::projectPoints( cv::Mat(objectPoints[i]), rvecs[i], tvecs[i], K, D, imagePoints2); err = cv::norm(cv::Mat(imagePoints_[id][i]), cv::Mat(imagePoints2), CV_L2); int n = (int)objectPoints[i].size(); reprojErrs[i] = (float) std::sqrt(err*err/n); totalErr += err*err; totalPoints += n; } double totalAvgErr = std::sqrt(totalErr/totalPoints); UINFO("avg re projection error = %f", totalAvgErr); cv::Mat P(3,4,CV_64FC1); P.at<double>(2,3) = 1; K.copyTo(P.colRange(0,3).rowRange(0,3)); std::cout << "cameraMatrix = " << K << std::endl; std::cout << "distCoeffs = " << D << std::endl; std::cout << "width = " << imageSize_[id].width << std::endl; std::cout << "height = " << imageSize_[id].height << std::endl; models_[id] = CameraModel(cameraName_.toStdString(), imageSize_[id], K, D, cv::Mat::eye(3,3,CV_64FC1), P); if(id == 0) { ui_->label_fx->setNum(models_[id].fx()); ui_->label_fy->setNum(models_[id].fy()); ui_->label_cx->setNum(models_[id].cx()); ui_->label_cy->setNum(models_[id].cy()); ui_->label_error->setNum(totalAvgErr); std::stringstream strK, strD, strR, strP; strK << models_[id].K(); strD << models_[id].D(); strR << models_[id].R(); strP << models_[id].P(); ui_->lineEdit_K->setText(strK.str().c_str()); ui_->lineEdit_D->setText(strD.str().c_str()); ui_->lineEdit_R->setText(strR.str().c_str()); ui_->lineEdit_P->setText(strP.str().c_str()); } else { ui_->label_fx_2->setNum(models_[id].fx()); ui_->label_fy_2->setNum(models_[id].fy()); ui_->label_cx_2->setNum(models_[id].cx()); ui_->label_cy_2->setNum(models_[id].cy()); ui_->label_error_2->setNum(totalAvgErr); std::stringstream strK, strD, strR, strP; strK << models_[id].K(); strD << models_[id].D(); strR << models_[id].R(); strP << models_[id].P(); ui_->lineEdit_K_2->setText(strK.str().c_str()); ui_->lineEdit_D_2->setText(strD.str().c_str()); ui_->lineEdit_R_2->setText(strR.str().c_str()); ui_->lineEdit_P_2->setText(strP.str().c_str()); } } if(stereo_ && models_[0].isValid() && models_[1].isValid()) { UINFO("stereo calibration (samples=%d)...", (int)stereoImagePoints_[0].size()); cv::Size imageSize = imageSize_[0].width > imageSize_[1].width?imageSize_[0]:imageSize_[1]; cv::Mat R, T, E, F; std::vector<std::vector<cv::Point3f> > objectPoints(1); cv::Size boardSize(ui_->spinBox_boardWidth->value(), ui_->spinBox_boardHeight->value()); float squareSize = ui_->doubleSpinBox_squareSize->value(); // compute board corner positions for( int i = 0; i < boardSize.height; ++i ) for( int j = 0; j < boardSize.width; ++j ) objectPoints[0].push_back(cv::Point3f(float( j*squareSize ), float( i*squareSize ), 0)); objectPoints.resize(stereoImagePoints_[0].size(), objectPoints[0]); // calibrate extrinsic #if CV_MAJOR_VERSION < 3 double rms = cv::stereoCalibrate( objectPoints, stereoImagePoints_[0], stereoImagePoints_[1], models_[0].K(), models_[0].D(), models_[1].K(), models_[1].D(), imageSize, R, T, E, F, cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, 1e-5), cv::CALIB_FIX_INTRINSIC); #else double rms = cv::stereoCalibrate( objectPoints, stereoImagePoints_[0], stereoImagePoints_[1], models_[0].K(), models_[0].D(), models_[1].K(), models_[1].D(), imageSize, R, T, E, F, cv::CALIB_FIX_INTRINSIC, cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, 1e-5)); #endif UINFO("stereo calibration... done with RMS error=%f", rms); double err = 0; int npoints = 0; std::vector<cv::Vec3f> lines[2]; UINFO("Computing avg re-projection error..."); for(unsigned int i = 0; i < stereoImagePoints_[0].size(); i++ ) { int npt = (int)stereoImagePoints_[0][i].size(); cv::Mat imgpt[2]; for(int k = 0; k < 2; k++ ) { imgpt[k] = cv::Mat(stereoImagePoints_[k][i]); cv::undistortPoints(imgpt[k], imgpt[k], models_[k].K(), models_[k].D(), cv::Mat(), models_[k].K()); computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]); } for(int j = 0; j < npt; j++ ) { double errij = fabs(stereoImagePoints_[0][i][j].x*lines[1][j][0] + stereoImagePoints_[0][i][j].y*lines[1][j][1] + lines[1][j][2]) + fabs(stereoImagePoints_[1][i][j].x*lines[0][j][0] + stereoImagePoints_[1][i][j].y*lines[0][j][1] + lines[0][j][2]); err += errij; } npoints += npt; } double totalAvgErr = err/(double)npoints; UINFO("stereo avg re projection error = %f", totalAvgErr); cv::Mat R1, R2, P1, P2, Q; cv::Rect validRoi[2]; cv::stereoRectify(models_[0].K(), models_[0].D(), models_[1].K(), models_[1].D(), imageSize, R, T, R1, R2, P1, P2, Q, cv::CALIB_ZERO_DISPARITY, 0, imageSize, &validRoi[0], &validRoi[1]); UINFO("Valid ROI1 = %d,%d,%d,%d ROI2 = %d,%d,%d,%d newImageSize=%d/%d", validRoi[0].x, validRoi[0].y, validRoi[0].width, validRoi[0].height, validRoi[1].x, validRoi[1].y, validRoi[1].width, validRoi[1].height, imageSize.width, imageSize.height); if(imageSize_[0].width == imageSize_[1].width) { //Stereo, keep new extrinsic projection matrix stereoModel_ = StereoCameraModel( cameraName_.toStdString(), imageSize_[0], models_[0].K(), models_[0].D(), R1, P1, imageSize_[1], models_[1].K(), models_[1].D(), R2, P2, R, T, E, F); } else { //Kinect stereoModel_ = StereoCameraModel( cameraName_.toStdString(), imageSize_[0], models_[0].K(), models_[0].D(), cv::Mat::eye(3,3,CV_64FC1), models_[0].P(), imageSize_[1], models_[1].K(), models_[1].D(), cv::Mat::eye(3,3,CV_64FC1), models_[1].P(), R, T, E, F); } std::stringstream strR1, strP1, strR2, strP2; strR1 << stereoModel_.left().R(); strP1 << stereoModel_.left().P(); strR2 << stereoModel_.right().R(); strP2 << stereoModel_.right().P(); ui_->lineEdit_R->setText(strR1.str().c_str()); ui_->lineEdit_P->setText(strP1.str().c_str()); ui_->lineEdit_R_2->setText(strR2.str().c_str()); ui_->lineEdit_P_2->setText(strP2.str().c_str()); ui_->label_baseline->setNum(stereoModel_.baseline()); //ui_->label_error_stereo->setNum(totalAvgErr); } if(stereo_ && stereoModel_.left().isValid() && stereoModel_.right().isValid()&& (!ui_->label_baseline->isVisible() || stereoModel_.baseline() > 0.0)) { ui_->radioButton_rectified->setEnabled(true); ui_->radioButton_stereoRectified->setEnabled(true); ui_->radioButton_stereoRectified->setChecked(true); ui_->pushButton_save->setEnabled(true); } else if(models_[0].isValid()) { ui_->radioButton_rectified->setEnabled(true); ui_->radioButton_rectified->setChecked(true); ui_->pushButton_save->setEnabled(!stereo_); } UINFO("End calibration"); processingData_ = false; }
void CameraThread::mainLoop() { UDEBUG(""); CameraInfo info; SensorData data = _camera->takeImage(&info); if(!data.imageRaw().empty()) { if(_colorOnly && !data.depthRaw().empty()) { data.setDepthOrRightRaw(cv::Mat()); } if(_imageDecimation>1 && !data.imageRaw().empty()) { UDEBUG(""); UTimer timer; if(!data.depthRaw().empty() && !(data.depthRaw().rows % _imageDecimation == 0 && data.depthRaw().cols % _imageDecimation == 0)) { UERROR("Decimation of depth images should be exact (decimation=%d, size=(%d,%d))! " "Images won't be resized.", _imageDecimation, data.depthRaw().cols, data.depthRaw().rows); } else { data.setImageRaw(util2d::decimate(data.imageRaw(), _imageDecimation)); data.setDepthOrRightRaw(util2d::decimate(data.depthOrRightRaw(), _imageDecimation)); std::vector<CameraModel> models = data.cameraModels(); for(unsigned int i=0; i<models.size(); ++i) { if(models[i].isValidForProjection()) { models[i] = models[i].scaled(1.0/double(_imageDecimation)); } } data.setCameraModels(models); StereoCameraModel stereoModel = data.stereoCameraModel(); if(stereoModel.isValidForProjection()) { stereoModel.scale(1.0/double(_imageDecimation)); data.setStereoCameraModel(stereoModel); } } info.timeImageDecimation = timer.ticks(); } if(_mirroring && data.cameraModels().size() == 1) { UDEBUG(""); UTimer timer; cv::Mat tmpRgb; cv::flip(data.imageRaw(), tmpRgb, 1); data.setImageRaw(tmpRgb); UASSERT_MSG(data.cameraModels().size() <= 1 && !data.stereoCameraModel().isValidForProjection(), "Only single RGBD cameras are supported for mirroring."); if(data.cameraModels().size() && data.cameraModels()[0].cx()) { CameraModel tmpModel( data.cameraModels()[0].fx(), data.cameraModels()[0].fy(), float(data.imageRaw().cols) - data.cameraModels()[0].cx(), data.cameraModels()[0].cy(), data.cameraModels()[0].localTransform()); data.setCameraModel(tmpModel); } if(!data.depthRaw().empty()) { cv::Mat tmpDepth; cv::flip(data.depthRaw(), tmpDepth, 1); data.setDepthOrRightRaw(tmpDepth); } info.timeMirroring = timer.ticks(); } if(_stereoToDepth && data.stereoCameraModel().isValidForProjection() && !data.rightRaw().empty()) { UDEBUG(""); UTimer timer; cv::Mat depth = util2d::depthFromDisparity( _stereoDense->computeDisparity(data.imageRaw(), data.rightRaw()), data.stereoCameraModel().left().fx(), data.stereoCameraModel().baseline()); data.setCameraModel(data.stereoCameraModel().left()); data.setDepthOrRightRaw(depth); data.setStereoCameraModel(StereoCameraModel()); info.timeDisparity = timer.ticks(); UDEBUG("Computing disparity = %f s", info.timeDisparity); } if(_scanFromDepth && data.cameraModels().size() && data.cameraModels().at(0).isValidForProjection() && !data.depthRaw().empty()) { UDEBUG(""); if(data.laserScanRaw().empty()) { UASSERT(_scanDecimation >= 1); UTimer timer; pcl::IndicesPtr validIndices(new std::vector<int>); pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = util3d::cloudFromSensorData(data, _scanDecimation, _scanMaxDepth, _scanMinDepth, validIndices.get()); float maxPoints = (data.depthRaw().rows/_scanDecimation)*(data.depthRaw().cols/_scanDecimation); if(_scanVoxelSize>0.0f) { cloud = util3d::voxelize(cloud, validIndices, _scanVoxelSize); float ratio = float(cloud->size()) / float(validIndices->size()); maxPoints = ratio * maxPoints; } else if(!cloud->is_dense) { pcl::PointCloud<pcl::PointXYZ>::Ptr denseCloud(new pcl::PointCloud<pcl::PointXYZ>); pcl::copyPointCloud(*cloud, *validIndices, *denseCloud); cloud = denseCloud; } cv::Mat scan; if(_scanNormalsK>0) { scan = util3d::laserScanFromPointCloud(*util3d::computeNormals(cloud, _scanNormalsK)); } else { scan = util3d::laserScanFromPointCloud(*cloud); } data.setLaserScanRaw(scan, (int)maxPoints, _scanMaxDepth); info.timeScanFromDepth = timer.ticks(); UDEBUG("Computing scan from depth = %f s", info.timeScanFromDepth); } else { UWARN("Option to create laser scan from depth image is enabled, but " "there is already a laser scan in the captured sensor data. Scan from " "depth will not be created."); } } info.cameraName = _camera->getSerial(); this->post(new CameraEvent(data, info)); } else if(!this->isKilled()) { UWARN("no more images..."); this->kill(); this->post(new CameraEvent()); } }
void CameraThread::postUpdate(SensorData * dataPtr, CameraInfo * info) const { UASSERT(dataPtr!=0); SensorData & data = *dataPtr; if(_colorOnly && !data.depthRaw().empty()) { data.setDepthOrRightRaw(cv::Mat()); } if(_distortionModel && !data.depthRaw().empty()) { UTimer timer; if(_distortionModel->getWidth() == data.depthRaw().cols && _distortionModel->getHeight() == data.depthRaw().rows ) { cv::Mat depth = data.depthRaw().clone();// make sure we are not modifying data in cached signatures. _distortionModel->undistort(depth); data.setDepthOrRightRaw(depth); } else { UERROR("Distortion model size is %dx%d but dpeth image is %dx%d!", _distortionModel->getWidth(), _distortionModel->getHeight(), data.depthRaw().cols, data.depthRaw().rows); } if(info) info->timeUndistortDepth = timer.ticks(); } if(_bilateralFiltering && !data.depthRaw().empty()) { UTimer timer; data.setDepthOrRightRaw(util2d::fastBilateralFiltering(data.depthRaw(), _bilateralSigmaS, _bilateralSigmaR)); if(info) info->timeBilateralFiltering = timer.ticks(); } if(_imageDecimation>1 && !data.imageRaw().empty()) { UDEBUG(""); UTimer timer; if(!data.depthRaw().empty() && !(data.depthRaw().rows % _imageDecimation == 0 && data.depthRaw().cols % _imageDecimation == 0)) { UERROR("Decimation of depth images should be exact (decimation=%d, size=(%d,%d))! " "Images won't be resized.", _imageDecimation, data.depthRaw().cols, data.depthRaw().rows); } else { data.setImageRaw(util2d::decimate(data.imageRaw(), _imageDecimation)); data.setDepthOrRightRaw(util2d::decimate(data.depthOrRightRaw(), _imageDecimation)); std::vector<CameraModel> models = data.cameraModels(); for(unsigned int i=0; i<models.size(); ++i) { if(models[i].isValidForProjection()) { models[i] = models[i].scaled(1.0/double(_imageDecimation)); } } data.setCameraModels(models); StereoCameraModel stereoModel = data.stereoCameraModel(); if(stereoModel.isValidForProjection()) { stereoModel.scale(1.0/double(_imageDecimation)); data.setStereoCameraModel(stereoModel); } } if(info) info->timeImageDecimation = timer.ticks(); } if(_mirroring && !data.imageRaw().empty() && data.cameraModels().size() == 1) { UDEBUG(""); UTimer timer; cv::Mat tmpRgb; cv::flip(data.imageRaw(), tmpRgb, 1); data.setImageRaw(tmpRgb); UASSERT_MSG(data.cameraModels().size() <= 1 && !data.stereoCameraModel().isValidForProjection(), "Only single RGBD cameras are supported for mirroring."); if(data.cameraModels().size() && data.cameraModels()[0].cx()) { CameraModel tmpModel( data.cameraModels()[0].fx(), data.cameraModels()[0].fy(), float(data.imageRaw().cols) - data.cameraModels()[0].cx(), data.cameraModels()[0].cy(), data.cameraModels()[0].localTransform()); data.setCameraModel(tmpModel); } if(!data.depthRaw().empty()) { cv::Mat tmpDepth; cv::flip(data.depthRaw(), tmpDepth, 1); data.setDepthOrRightRaw(tmpDepth); } if(info) info->timeMirroring = timer.ticks(); } if(_stereoToDepth && !data.imageRaw().empty() && data.stereoCameraModel().isValidForProjection() && !data.rightRaw().empty()) { UDEBUG(""); UTimer timer; cv::Mat depth = util2d::depthFromDisparity( _stereoDense->computeDisparity(data.imageRaw(), data.rightRaw()), data.stereoCameraModel().left().fx(), data.stereoCameraModel().baseline()); // set Tx for stereo bundle adjustment (when used) CameraModel model = CameraModel( data.stereoCameraModel().left().fx(), data.stereoCameraModel().left().fy(), data.stereoCameraModel().left().cx(), data.stereoCameraModel().left().cy(), data.stereoCameraModel().localTransform(), -data.stereoCameraModel().baseline()*data.stereoCameraModel().left().fx()); data.setCameraModel(model); data.setDepthOrRightRaw(depth); data.setStereoCameraModel(StereoCameraModel()); if(info) info->timeDisparity = timer.ticks(); } if(_scanFromDepth && data.cameraModels().size() && data.cameraModels().at(0).isValidForProjection() && !data.depthRaw().empty()) { UDEBUG(""); if(data.laserScanRaw().empty()) { UASSERT(_scanDecimation >= 1); UTimer timer; pcl::IndicesPtr validIndices(new std::vector<int>); pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = util3d::cloudFromSensorData( data, _scanDecimation, _scanMaxDepth, _scanMinDepth, validIndices.get()); float maxPoints = (data.depthRaw().rows/_scanDecimation)*(data.depthRaw().cols/_scanDecimation); cv::Mat scan; const Transform & baseToScan = data.cameraModels()[0].localTransform(); if(validIndices->size()) { if(_scanVoxelSize>0.0f) { cloud = util3d::voxelize(cloud, validIndices, _scanVoxelSize); float ratio = float(cloud->size()) / float(validIndices->size()); maxPoints = ratio * maxPoints; } else if(!cloud->is_dense) { pcl::PointCloud<pcl::PointXYZ>::Ptr denseCloud(new pcl::PointCloud<pcl::PointXYZ>); pcl::copyPointCloud(*cloud, *validIndices, *denseCloud); cloud = denseCloud; } if(cloud->size()) { if(_scanNormalsK>0) { Eigen::Vector3f viewPoint(baseToScan.x(), baseToScan.y(), baseToScan.z()); pcl::PointCloud<pcl::Normal>::Ptr normals = util3d::computeNormals(cloud, _scanNormalsK, viewPoint); pcl::PointCloud<pcl::PointNormal>::Ptr cloudNormals(new pcl::PointCloud<pcl::PointNormal>); pcl::concatenateFields(*cloud, *normals, *cloudNormals); scan = util3d::laserScanFromPointCloud(*cloudNormals, baseToScan.inverse()); } else { scan = util3d::laserScanFromPointCloud(*cloud, baseToScan.inverse()); } } } data.setLaserScanRaw(scan, LaserScanInfo((int)maxPoints, _scanMaxDepth, baseToScan)); if(info) info->timeScanFromDepth = timer.ticks(); } else { UWARN("Option to create laser scan from depth image is enabled, but " "there is already a laser scan in the captured sensor data. Scan from " "depth will not be created."); } } }