void DataRecorder::showImage(const cv::Mat & image, const cv::Mat & depth)
{
	processingImages_ = true;
	imageView_->setImage(uCvMat2QImage(image));
	imageView_->setImageDepth(uCvMat2QImage(depth));
	label_->setText(tr("Images=%1 (~%2 MB)").arg(count_).arg(totalSizeKB_/1000));
	processingImages_ = false;
}
Beispiel #2
0
void PdfPlotItem::showDescription(bool shown)
{
	if(!_text)
	{
		_text = new QGraphicsTextItem(this);
		_text->setVisible(false);
	}
	if(shown)
	{
		if(!_img && _signaturesRef)
		{
			QImage img;
			QMap<int, Signature>::const_iterator iter = _signaturesRef->find(int(this->data().x()));
			if(iter != _signaturesRef->constEnd() && !iter.value().sensorData().imageCompressed().empty())
			{
				cv::Mat image;
				iter.value().sensorData().uncompressDataConst(&image, 0, 0);
				if(!image.empty())
				{
					img = uCvMat2QImage(image);
					QPixmap scaled = QPixmap::fromImage(img).scaledToWidth(128);
					_img = new QGraphicsPixmapItem(scaled, this);
					_img->setVisible(false);
				}
			}
		}

		if(_img)
			_text->setPos(this->mapFromScene(4+150,0));
		else
			_text->setPos(this->mapFromScene(4,0));
		if(_childCount >= 0)
		{
			_text->setPlainText(QString("ID = %1\nValue = %2\nWeight = %3").arg(this->data().x()).arg(this->data().y()).arg(_childCount));
		}
		else
		{
			_text->setPlainText(QString("ID = %1\nValue = %2").arg(this->data().x()).arg(this->data().y()));
		}
		_text->setVisible(true);
		if(_img)
		{
			_img->setPos(this->mapFromScene(4,0));
			_img->setVisible(true);
		}
	}
	else
	{
		_text->setVisible(false);
		if(_img)
			_img->setVisible(false);
	}
	UPlotItem::showDescription(shown);
}
void PdfPlotItem::showDescription(bool shown)
{
	if(!_text)
	{
		_text = new QGraphicsTextItem(this);
		_text->setVisible(false);
	}
	if(shown)
	{
		if(!_img && _imagesRef)
		{
			QImage img;
			QMap<int, std::vector<unsigned char> >::const_iterator iter = _imagesRef->find(int(this->data().x()));
			if(iter != _imagesRef->constEnd())
			{
				img = uCvMat2QImage(util3d::uncompressImage(iter.value()));
				QPixmap scaled = QPixmap::fromImage(img).scaledToWidth(128);
				_img = new QGraphicsPixmapItem(scaled, this);
				_img->setVisible(false);
			}
		}

		if(_img)
			_text->setPos(this->mapFromScene(4+150,0));
		else
			_text->setPos(this->mapFromScene(4,0));
		if(_childCount >= 0)
		{
			_text->setPlainText(QString("ID = %1\nValue = %2\nWeight = %3").arg(this->data().x()).arg(this->data().y()).arg(_childCount));
		}
		else
		{
			_text->setPlainText(QString("ID = %1\nValue = %2").arg(this->data().x()).arg(this->data().y()));
		}
		_text->setVisible(true);
		if(_img)
		{
			_img->setPos(this->mapFromScene(4,0));
			_img->setVisible(true);
		}
	}
	else
	{
		_text->setVisible(false);
		if(_img)
			_img->setVisible(false);
	}
	UPlotItem::showDescription(shown);
}
void CalibrationDialog::processImages(const cv::Mat & imageLeft, const cv::Mat & imageRight, const QString & cameraName)
{
	processingData_ = true;
	if(cameraName_.isEmpty())
	{
		cameraName_ = "0000";
		if(!cameraName.isEmpty())
		{
			cameraName_ = cameraName;
		}
	}
	if(ui_->label_serial->text().isEmpty())
	{
		ui_->label_serial->setText(cameraName_);

	}
	std::vector<cv::Mat> inputRawImages(2);
	if(ui_->checkBox_switchImages->isChecked())
	{
		inputRawImages[0] = imageRight;
		inputRawImages[1] = imageLeft;
	}
	else
	{
		inputRawImages[0] = imageLeft;
		inputRawImages[1] = imageRight;
	}

	std::vector<cv::Mat> images(2);
	images[0] = inputRawImages[0];
	images[1] = inputRawImages[1];
	imageSize_[0] = images[0].size();
	imageSize_[1] = images[1].size();

	bool boardFound[2] = {false};
	bool boardAccepted[2] = {false};
	bool readyToCalibrate[2] = {false};

	std::vector<std::vector<cv::Point2f> > pointBuf(2);

	bool depthDetected = false;
	for(int id=0; id<(stereo_?2:1); ++id)
	{
		cv::Mat viewGray;
		if(!images[id].empty())
		{
			if(images[id].type() == CV_16UC1)
			{
				depthDetected = true;
				//assume IR image: convert to gray scaled
				const float factor = 255.0f / float((maxIrs_[id] - minIrs_[id]));
				viewGray = cv::Mat(images[id].rows, images[id].cols, CV_8UC1);
				for(int i=0; i<images[id].rows; ++i)
				{
					for(int j=0; j<images[id].cols; ++j)
					{
						viewGray.at<unsigned char>(i, j) = (unsigned char)std::min(float(std::max(images[id].at<unsigned short>(i,j) - minIrs_[id], 0)) * factor, 255.0f);
					}
				}
				cvtColor(viewGray, images[id], cv::COLOR_GRAY2BGR); // convert to show detected points in color
			}
			else if(images[id].channels() == 3)
			{
				cvtColor(images[id], viewGray, cv::COLOR_BGR2GRAY);
			}
			else
			{
				viewGray = images[id];
				cvtColor(viewGray, images[id], cv::COLOR_GRAY2BGR); // convert to show detected points in color
			}
		}
		else
		{
			UERROR("Image %d is empty!! Should not!", id);
		}

		minIrs_[id] = 0;
		maxIrs_[id] = 0x7FFF;

		//Dot it only if not yet calibrated
		if(!ui_->pushButton_save->isEnabled())
		{
			cv::Size boardSize(ui_->spinBox_boardWidth->value(), ui_->spinBox_boardHeight->value());
			if(!viewGray.empty())
			{
				int flags = CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE;

				if(!viewGray.empty())
				{
					int maxScale = viewGray.cols < 640?2:1;
					for( int scale = 1; scale <= maxScale; scale++ )
					{
						cv::Mat timg;
						if( scale == 1 )
							timg = viewGray;
						else
							cv::resize(viewGray, timg, cv::Size(), scale, scale, CV_INTER_CUBIC);
						boardFound[id] = cv::findChessboardCorners(timg, boardSize, pointBuf[id], flags);
						if(boardFound[id])
						{
							if( scale > 1 )
							{
								cv::Mat cornersMat(pointBuf[id]);
								cornersMat *= 1./scale;
							}
							break;
						}
					}
				}
			}

			if(boardFound[id]) // If done with success,
			{
				// improve the found corners' coordinate accuracy for chessboard
				float minSquareDistance = -1.0f;
				for(unsigned int i=0; i<pointBuf[id].size()-1; ++i)
				{
					float d = cv::norm(pointBuf[id][i] - pointBuf[id][i+1]);
					if(minSquareDistance == -1.0f || minSquareDistance > d)
					{
						minSquareDistance = d;
					}
				}
				float radius = minSquareDistance/2.0f +0.5f;
				cv::cornerSubPix( viewGray, pointBuf[id], cv::Size(radius, radius), cv::Size(-1,-1),
						cv::TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1 ));

				// Draw the corners.
				cv::drawChessboardCorners(images[id], boardSize, cv::Mat(pointBuf[id]), boardFound[id]);

				std::vector<float> params(4,0);
				getParams(pointBuf[id], boardSize, imageSize_[id], params[0], params[1], params[2], params[3]);

				bool addSample = true;
				for(unsigned int i=0; i<imageParams_[id].size(); ++i)
				{
					if(fabs(params[0] - imageParams_[id][i].at(0)) < 0.1 && // x
						fabs(params[1] - imageParams_[id][i].at(1)) < 0.1 && // y
						fabs(params[2] - imageParams_[id][i].at(2)) < 0.05 && // size
						fabs(params[3] - imageParams_[id][i].at(3)) < 0.1) // skew
					{
						addSample = false;
					}
				}
				if(addSample)
				{
					boardAccepted[id] = true;

					imagePoints_[id].push_back(pointBuf[id]);
					imageParams_[id].push_back(params);

					UINFO("[%d] Added board, total=%d. (x=%f, y=%f, size=%f, skew=%f)", id, (int)imagePoints_[id].size(), params[0], params[1], params[2], params[3]);
				}

				// update statistics
				std::vector<float> xRange(2, imageParams_[id][0].at(0));
				std::vector<float> yRange(2, imageParams_[id][0].at(1));
				std::vector<float> sizeRange(2, imageParams_[id][0].at(2));
				std::vector<float> skewRange(2, imageParams_[id][0].at(3));
				for(unsigned int i=1; i<imageParams_[id].size(); ++i)
				{
					xRange[0] = imageParams_[id][i].at(0) < xRange[0] ? imageParams_[id][i].at(0) : xRange[0];
					xRange[1] = imageParams_[id][i].at(0) > xRange[1] ? imageParams_[id][i].at(0) : xRange[1];
					yRange[0] = imageParams_[id][i].at(1) < yRange[0] ? imageParams_[id][i].at(1) : yRange[0];
					yRange[1] = imageParams_[id][i].at(1) > yRange[1] ? imageParams_[id][i].at(1) : yRange[1];
					sizeRange[0] = imageParams_[id][i].at(2) < sizeRange[0] ? imageParams_[id][i].at(2) : sizeRange[0];
					sizeRange[1] = imageParams_[id][i].at(2) > sizeRange[1] ? imageParams_[id][i].at(2) : sizeRange[1];
					skewRange[0] = imageParams_[id][i].at(3) < skewRange[0] ? imageParams_[id][i].at(3) : skewRange[0];
					skewRange[1] = imageParams_[id][i].at(3) > skewRange[1] ? imageParams_[id][i].at(3) : skewRange[1];
				}
				//UINFO("Stats [%d]:", id);
				//UINFO("  Count = %d", (int)imagePoints_[id].size());
				//UINFO("  x =    [%f -> %f]", xRange[0], xRange[1]);
				//UINFO("  y =    [%f -> %f]", yRange[0], yRange[1]);
				//UINFO("  size = [%f -> %f]", sizeRange[0], sizeRange[1]);
				//UINFO("  skew = [%f -> %f]", skewRange[0], skewRange[1]);

				float xGood = xRange[1] - xRange[0];
				float yGood = yRange[1] - yRange[0];
				float sizeGood = sizeRange[1] - sizeRange[0];
				float skewGood = skewRange[1] - skewRange[0];

				if(id == 0)
				{
					ui_->progressBar_x->setValue(xGood*100);
					ui_->progressBar_y->setValue(yGood*100);
					ui_->progressBar_size->setValue(sizeGood*100);
					ui_->progressBar_skew->setValue(skewGood*100);
					if((int)imagePoints_[id].size() > ui_->progressBar_count->maximum())
					{
						ui_->progressBar_count->setMaximum((int)imagePoints_[id].size());
					}
					ui_->progressBar_count->setValue((int)imagePoints_[id].size());
				}
				else
				{
					ui_->progressBar_x_2->setValue(xGood*100);
					ui_->progressBar_y_2->setValue(yGood*100);
					ui_->progressBar_size_2->setValue(sizeGood*100);
					ui_->progressBar_skew_2->setValue(skewGood*100);

					if((int)imagePoints_[id].size() > ui_->progressBar_count_2->maximum())
					{
						ui_->progressBar_count_2->setMaximum((int)imagePoints_[id].size());
					}
					ui_->progressBar_count_2->setValue((int)imagePoints_[id].size());
				}

				if(imagePoints_[id].size() >= COUNT_MIN && xGood > 0.5 && yGood > 0.5 && sizeGood > 0.4 && skewGood > 0.5)
				{
					readyToCalibrate[id] = true;
				}

				//update IR values
				if(inputRawImages[id].type() == CV_16UC1)
				{
					//update min max IR if the chessboard was found
					minIrs_[id] = 0xFFFF;
					maxIrs_[id] = 0;
					for(size_t i = 0; i < pointBuf[id].size(); ++i)
					{
						const cv::Point2f &p = pointBuf[id][i];
						cv::Rect roi(std::max(0, (int)p.x - 3), std::max(0, (int)p.y - 3), 6, 6);

						roi.width = std::min(roi.width, inputRawImages[id].cols - roi.x);
						roi.height = std::min(roi.height, inputRawImages[id].rows - roi.y);

						//find minMax in the roi
						double min, max;
						cv::minMaxLoc(inputRawImages[id](roi), &min, &max);
						if(min < minIrs_[id])
						{
							minIrs_[id] = min;
						}
						if(max > maxIrs_[id])
						{
							maxIrs_[id] = max;
						}
					}
				}
			}
		}
	}
	ui_->label_baseline->setVisible(!depthDetected);
	ui_->label_baseline_name->setVisible(!depthDetected);

	if(stereo_ && ((boardAccepted[0] && boardFound[1]) || (boardAccepted[1] && boardFound[0])))
	{
		stereoImagePoints_[0].push_back(pointBuf[0]);
		stereoImagePoints_[1].push_back(pointBuf[1]);
		UINFO("Add stereo image points (size=%d)", (int)stereoImagePoints_[0].size());
	}

	if(!stereo_ && readyToCalibrate[0])
	{
		ui_->pushButton_calibrate->setEnabled(true);
	}
	else if(stereo_ && readyToCalibrate[0] && readyToCalibrate[1] && stereoImagePoints_[0].size())
	{
		ui_->pushButton_calibrate->setEnabled(true);
	}

	if(ui_->radioButton_rectified->isChecked())
	{
		if(models_[0].isValid())
		{
			images[0] = models_[0].rectifyImage(images[0]);
		}
		if(models_[1].isValid())
		{
			images[1] = models_[1].rectifyImage(images[1]);
		}
	}
	else if(ui_->radioButton_stereoRectified->isChecked() &&
			(stereoModel_.left().isValid() &&
			stereoModel_.right().isValid()&&
			(!ui_->label_baseline->isVisible() || stereoModel_.baseline() > 0.0)))
	{
		images[0] = stereoModel_.left().rectifyImage(images[0]);
		images[1] = stereoModel_.right().rectifyImage(images[1]);
	}

	if(ui_->checkBox_showHorizontalLines->isChecked())
	{
		for(int id=0; id<(stereo_?2:1); ++id)
		{
			int step = imageSize_[id].height/16;
			for(int i=step; i<imageSize_[id].height; i+=step)
			{
				cv::line(images[id], cv::Point(0, i), cv::Point(imageSize_[id].width, i), CV_RGB(0,255,0));
			}
		}
	}

	ui_->label_left->setText(tr("%1x%2").arg(images[0].cols).arg(images[0].rows));

	//show frame
	ui_->image_view->setImage(uCvMat2QImage(images[0]).mirrored(ui_->checkBox_mirror->isChecked(), false));
	if(stereo_)
	{
		ui_->label_right->setText(tr("%1x%2").arg(images[1].cols).arg(images[1].rows));
		ui_->image_view_2->setImage(uCvMat2QImage(images[1]).mirrored(ui_->checkBox_mirror->isChecked(), false));
	}
	processingData_ = false;
}
Beispiel #5
0
void OdometryViewer::processData(const rtabmap::OdometryEvent & odom)
{
	processingData_ = true;
	int quality = odom.info().inliers;

	bool lost = false;
	bool lostStateChanged = false;

	if(odom.pose().isNull())
	{
		UDEBUG("odom lost"); // use last pose
		lostStateChanged = imageView_->getBackgroundColor() != Qt::darkRed;
		imageView_->setBackgroundColor(Qt::darkRed);
		cloudView_->setBackgroundColor(Qt::darkRed);

		lost = true;
	}
	else if(odom.info().inliers>0 &&
			qualityWarningThr_ &&
			odom.info().inliers < qualityWarningThr_)
	{
		UDEBUG("odom warn, quality(inliers)=%d thr=%d", odom.info().inliers, qualityWarningThr_);
		lostStateChanged = imageView_->getBackgroundColor() == Qt::darkRed;
		imageView_->setBackgroundColor(Qt::darkYellow);
		cloudView_->setBackgroundColor(Qt::darkYellow);
	}
	else
	{
		UDEBUG("odom ok");
		lostStateChanged = imageView_->getBackgroundColor() == Qt::darkRed;
		imageView_->setBackgroundColor(cloudView_->getDefaultBackgroundColor());
		cloudView_->setBackgroundColor(Qt::black);
	}

	timeLabel_->setText(QString("%1 s").arg(odom.info().timeEstimation));

	if(!odom.data().imageRaw().empty() &&
		!odom.data().depthOrRightRaw().empty() &&
		(odom.data().stereoCameraModel().isValid() || odom.data().cameraModels().size()))
	{
		UDEBUG("New pose = %s, quality=%d", odom.pose().prettyPrint().c_str(), quality);

		if(!odom.data().depthRaw().empty())
		{
			if(odom.data().imageRaw().cols % decimationSpin_->value() == 0 &&
			   odom.data().imageRaw().rows % decimationSpin_->value() == 0)
			{
				validDecimationValue_ = decimationSpin_->value();
			}
			else
			{
				UWARN("Decimation (%d) must be a denominator of the width and height of "
						"the image (%d/%d). Using last valid decimation value (%d).",
						decimationSpin_->value(),
						odom.data().imageRaw().cols,
						odom.data().imageRaw().rows,
						validDecimationValue_);
			}
		}
		else
		{
			validDecimationValue_ = decimationSpin_->value();
		}

		// visualization: buffering the clouds
		// Create the new cloud
		pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud;
		cloud = util3d::cloudRGBFromSensorData(
				odom.data(),
				validDecimationValue_,
				0.0f,
				voxelSpin_->value());

		if(cloud->size())
		{
			if(!odom.pose().isNull())
			{
				if(cloudView_->getAddedClouds().contains("cloudtmp"))
				{
					cloudView_->removeCloud("cloudtmp");
				}

				while(maxCloudsSpin_->value()>0 && (int)addedClouds_.size() > maxCloudsSpin_->value())
				{
					UASSERT(cloudView_->removeCloud(addedClouds_.first()));
					addedClouds_.pop_front();
				}

				odom.data().id()?id_=odom.data().id():++id_;
				std::string cloudName = uFormat("cloud%d", id_);
				addedClouds_.push_back(cloudName);
				UASSERT(cloudView_->addCloud(cloudName, cloud, odom.pose()));
			}
			else
			{
				cloudView_->addOrUpdateCloud("cloudtmp", cloud, lastOdomPose_);
			}
		}
	}

	if(!odom.pose().isNull())
	{
		lastOdomPose_ = odom.pose();
		cloudView_->updateCameraTargetPosition(odom.pose());
	}

	if(odom.info().localMap.size())
	{
		pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
		cloud->resize(odom.info().localMap.size());
		int i=0;
		for(std::multimap<int, cv::Point3f>::const_iterator iter=odom.info().localMap.begin(); iter!=odom.info().localMap.end(); ++iter)
		{
			(*cloud)[i].x = iter->second.x;
			(*cloud)[i].y = iter->second.y;
			(*cloud)[i++].z = iter->second.z;
		}
		cloudView_->addOrUpdateCloud("localmap", cloud);
	}

	if(!odom.data().imageRaw().empty())
	{
		if(odom.info().type == 0)
		{
			imageView_->setFeatures(odom.info().words, odom.data().depthRaw(), Qt::yellow);
		}
		else if(odom.info().type == 1)
		{
			std::vector<cv::KeyPoint> kpts;
			cv::KeyPoint::convert(odom.info().refCorners, kpts);
			imageView_->setFeatures(kpts, odom.data().depthRaw(), Qt::red);
		}

		imageView_->clearLines();
		if(lost)
		{
			if(lostStateChanged)
			{
				// save state
				odomImageShow_ = imageView_->isImageShown();
				odomImageDepthShow_ = imageView_->isImageDepthShown();
			}
			imageView_->setImageDepth(uCvMat2QImage(odom.data().imageRaw()));
			imageView_->setImageShown(true);
			imageView_->setImageDepthShown(true);
		}
		else
		{
			if(lostStateChanged)
			{
				// restore state
				imageView_->setImageShown(odomImageShow_);
				imageView_->setImageDepthShown(odomImageDepthShow_);
			}

			imageView_->setImage(uCvMat2QImage(odom.data().imageRaw()));
			if(imageView_->isImageDepthShown())
			{
				imageView_->setImageDepth(uCvMat2QImage(odom.data().depthOrRightRaw()));
			}

			if(odom.info().type == 0)
			{
				if(imageView_->isFeaturesShown())
				{
					for(unsigned int i=0; i<odom.info().wordMatches.size(); ++i)
					{
						imageView_->setFeatureColor(odom.info().wordMatches[i], Qt::red); // outliers
					}
					for(unsigned int i=0; i<odom.info().wordInliers.size(); ++i)
					{
						imageView_->setFeatureColor(odom.info().wordInliers[i], Qt::green); // inliers
					}
				}
			}
		}
		if(odom.info().type == 1 && odom.info().cornerInliers.size())
		{
			if(imageView_->isFeaturesShown() || imageView_->isLinesShown())
			{
				//draw lines
				UASSERT(odom.info().refCorners.size() == odom.info().newCorners.size());
				for(unsigned int i=0; i<odom.info().cornerInliers.size(); ++i)
				{
					if(imageView_->isFeaturesShown())
					{
						imageView_->setFeatureColor(odom.info().cornerInliers[i], Qt::green); // inliers
					}
					if(imageView_->isLinesShown())
					{
						imageView_->addLine(
								odom.info().refCorners[odom.info().cornerInliers[i]].x,
								odom.info().refCorners[odom.info().cornerInliers[i]].y,
								odom.info().newCorners[odom.info().cornerInliers[i]].x,
								odom.info().newCorners[odom.info().cornerInliers[i]].y,
								Qt::blue);
					}
				}
			}
		}

		if(!odom.data().imageRaw().empty())
		{
			imageView_->setSceneRect(QRectF(0,0,(float)odom.data().imageRaw().cols, (float)odom.data().imageRaw().rows));
		}
	}

	imageView_->update();
	cloudView_->update();
	QApplication::processEvents();
	processingData_ = false;
}
Beispiel #6
0
void CameraViewer::showImage(const rtabmap::SensorData & data)
{
	if(!data.imageRaw().empty() || !data.depthOrRightRaw().empty())
	{
		if(data.imageRaw().cols % decimationSpin_->value() == 0 &&
		   data.imageRaw().rows % decimationSpin_->value() == 0 &&
		   data.depthOrRightRaw().cols % decimationSpin_->value() == 0 &&
		   data.depthOrRightRaw().rows % decimationSpin_->value() == 0)
		{
			validDecimationValue_ = decimationSpin_->value();
		}
		else
		{
			UWARN("Decimation (%d) must be a denominator of the width and height of "
					"the image (color=%d/%d depth=%d/%d). Using last valid decimation value (%d).",
					decimationSpin_->value(),
					data.imageRaw().cols,
					data.imageRaw().rows,
					data.depthOrRightRaw().cols,
					data.depthOrRightRaw().rows,
					validDecimationValue_);
		}
	}

	processingImages_ = true;
	if(!data.imageRaw().empty())
	{
		imageView_->setImage(uCvMat2QImage(util2d::decimate(data.imageRaw(), validDecimationValue_)));
	}
	if(!data.depthOrRightRaw().empty())
	{
		imageView_->setImageDepth(util2d::decimate(data.depthOrRightRaw(), validDecimationValue_));
	}

	if(!data.depthOrRightRaw().empty() &&
	   (data.stereoCameraModel().isValidForProjection() || (data.cameraModels().size() && data.cameraModels().at(0).isValidForProjection())))
	{
		if(showCloudCheckbox_->isChecked())
		{
			if(!data.imageRaw().empty() && !data.depthOrRightRaw().empty())
			{
				showCloudCheckbox_->setEnabled(true);
				cloudView_->addCloud("cloud", util3d::cloudRGBFromSensorData(data, validDecimationValue_, 0, 0, 0, parameters_));
			}
			else if(!data.depthOrRightRaw().empty())
			{
				showCloudCheckbox_->setEnabled(true);
				cloudView_->addCloud("cloud", util3d::cloudFromSensorData(data, validDecimationValue_, 0, 0, 0, parameters_));
			}
		}
	}

	if(!data.laserScanRaw().isEmpty())
	{
		showScanCheckbox_->setEnabled(true);
		if(showScanCheckbox_->isChecked())
		{
			cloudView_->addCloud("scan", util3d::downsample(util3d::laserScanToPointCloud(data.laserScanRaw()), validDecimationValue_), Transform::getIdentity(), Qt::yellow);
		}
	}

	cloudView_->setVisible((showCloudCheckbox_->isEnabled() && showCloudCheckbox_->isChecked()) ||
						   (showScanCheckbox_->isEnabled() && showScanCheckbox_->isChecked()));
	if(cloudView_->isVisible())
	{
		cloudView_->update();
	}
	if(cloudView_->getAddedClouds().contains("cloud"))
	{
		cloudView_->setCloudVisibility("cloud", showCloudCheckbox_->isChecked());
	}
	if(cloudView_->getAddedClouds().contains("scan"))
	{
		cloudView_->setCloudVisibility("scan", showScanCheckbox_->isChecked());
	}

	processingImages_ = false;
}