Пример #1
0
void ImageWidget::paintEvent(QPaintEvent *)
{
  QRectF desiredRect[4];
  desiredRect[0] = QRectF(  QPointF(desiredSize.width(), desiredSize.height()), 
                            QPointF(2.0*desiredSize.width(), 2.0*desiredSize.height()));
  desiredRect[1] = QRectF(  QPointF(0.0, desiredSize.height()), 
                            QPointF(desiredSize.width(), 2.0*desiredSize.height()));
  desiredRect[2] = QRectF(  QPointF(0.0, 0.0), 
                            QPointF(desiredSize.width(), desiredSize.height()));
  desiredRect[3] = QRectF(  QPointF(desiredSize.width(), 0.0), 
                            QPointF(2.0*desiredSize.width(), desiredSize.height()));

  QPainter painter;
  QPen pen(Qt::red, 10, Qt::SolidLine, Qt::RoundCap, Qt::RoundJoin);
  
  
  QRectF defaultRect(0.0, 0.0, imgSize.width(), imgSize.height());
  QRectF appRect(0.0, 0.0, desiredSize.width(), desiredSize.height());

  
  for( int i = 0; i < 4; ++i)
  {
    /* load the image */
    image.load(fileName[i]);
    
    painter.begin(&image);
    painter.drawPixmap(imagePos(image), QPixmap::fromImage(image));
    painter.end();
    
    /* get the area ready to draw in */
    painter.begin(this);
    painter.setCompositionMode(QPainter::CompositionMode_Source);
    painter.drawImage(desiredRect[i], image, defaultRect);
    painter.end();
  }
  
  for (list<QLine>::iterator it = lines.begin(); it != lines.end(); it++)
  {
    painter.begin(this);
    painter.setPen(pen);
    painter.drawLine(*it);
    painter.end();
  }

  
  

}
Пример #2
0
//! [11]
void ImageComposer::loadImage(const QString &fileName, QImage *image,
                              QToolButton *button)
{
    image->load(fileName);

    QImage fixedImage(resultSize, QImage::Format_ARGB32_Premultiplied);
    QPainter painter(&fixedImage);
    painter.setCompositionMode(QPainter::CompositionMode_Source);
    painter.fillRect(fixedImage.rect(), Qt::transparent);
    painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
    painter.drawImage(imagePos(*image), *image);
    painter.end();
    button->setIcon(QPixmap::fromImage(fixedImage));

    *image = fixedImage;

    recalculateResult();
}
Пример #3
0
void PreviewColorPickerTool::CalcCorrection(hugin_utils::FDiff2D pos)
{
    m_red=0;
    m_blue=0;
    m_count=0;
    HuginBase::Panorama* pano=helper->GetPanoramaPtr();
    HuginBase::UIntSet activeImages=pano->getActiveImages();
    if(activeImages.size()>0)
    {
        for(HuginBase::UIntSet::iterator it=activeImages.begin();it!=activeImages.end();++it)
        {
            //check if point is inside the image, check also all 4 corners of rectangle
            HuginBase::PTools::Transform trans;
            trans.createTransform(pano->getImage(*it),pano->getOptions());
            double x;
            double y;
            if(trans.transformImgCoord(x,y,pos.x,pos.y))
            {
                vigra::Point2D imagePos(x,y);
                if(pano->getImage(*it).isInside(imagePos) && 
                    pano->getImage(*it).isInside(imagePos + vigra::Point2D(-ColorPickerSize,-ColorPickerSize)) &&
                    pano->getImage(*it).isInside(imagePos + vigra::Point2D(-ColorPickerSize, ColorPickerSize)) &&
                    pano->getImage(*it).isInside(imagePos + vigra::Point2D( ColorPickerSize,-ColorPickerSize)) &&
                    pano->getImage(*it).isInside(imagePos + vigra::Point2D( ColorPickerSize, ColorPickerSize)) 
                   )
                {
                    CalcCorrectionForImage(*it,imagePos);
                };
            };
        };
    };
    if(m_count>0)
    {
        m_red=m_red/m_count;
        m_blue=m_blue/m_count;
    };
};
Пример #4
0
std::unique_ptr<Map> SceneGenerator::generateFromPoses(const std::vector<Eigen::Matrix3fr> &posesR, const std::vector<Eigen::Vector3f> &posesCenter)
{
	std::unique_ptr<Map> newMap(new Map());

	newMap->mGroundTruthCamera.reset(new CameraModel(*mCamera));

	cv::Mat3b nullImg3(mCamera->getImageSize()[1], mCamera->getImageSize()[0]);
	cv::Mat1b nullImg1(mCamera->getImageSize()[1], mCamera->getImageSize()[0]);
	Eigen::Matrix<uchar, 1, 32> nullDescr;
	nullDescr.setZero();

	float expectedPixelNoiseStd = std::max(0.3f, mNoiseStd);

	//Features
	const int kFeatureCount = 1000;
	std::uniform_real_distribution<float> distributionx(0, mCamera->getImageSize()[0]);
	std::uniform_real_distribution<float> distributiony(0, mCamera->getImageSize()[1]);
	for (int i = 0; i < kFeatureCount; i++)
	{
		std::unique_ptr<Feature> newFeature(new Feature());

		//Get position
		Eigen::Vector2f imagePos(distributionx(mRandomDevice), distributiony(mRandomDevice));
		Eigen::Vector2f xn = mCamera->unprojectToWorld(imagePos).hnormalized();

		newFeature->mGroundTruthPosition3D = Eigen::Vector3f(xn[0], xn[1], 0);
		newFeature->mPosition3D = newFeature->mGroundTruthPosition3D;
		//newFeature->setPosition(imagePos);

		newMap->addFeature(std::move(newFeature));
	}

	//Matlab log of poses
	if (mLogScene)
	{
		//Eigen::MatrixX3f points(newMap->getFeatures().size(), 3);
		//for (int i = 0; i < points.rows(); i++)
		//{
		//	points.row(i) = newMap->getFeatures()[i]->mPosition3D.transpose();
		//}
		//MatlabDataLog::Instance().AddValue("K", mCamera->getK());
		//MatlabDataLog::Instance().AddValue("X", points.transpose());
		//for (int i = 0; i < posesR.size(); i++)
		//{
		//	MatlabDataLog::Instance().AddCell("R", posesR[i]);
		//	MatlabDataLog::Instance().AddCell("center", posesCenter[i]);
		//}
		//return std::unique_ptr<Map>();
	}

	//Frames
	std::normal_distribution<float> error_distribution(0, mNoiseStd);
	for (int i = 0; i < (int)posesR.size(); i++)
	{
		std::unique_ptr<Keyframe> newFrame(new Keyframe());

		newFrame->init(nullImg3, nullImg1);
		
		Eigen::Vector3f poseT = -posesR[i] * posesCenter[i];

		newFrame->mGroundTruthPose3DR = posesR[i];
		newFrame->mGroundTruthPose3DT = poseT;

		//Stats
		std::vector<float> distortionError;
		std::vector<float> noiseError;

		//Points
		std::vector<Eigen::Vector2f> refPoints, imgPoints;
		for (int j = 0; j< (int)newMap->getFeatures().size(); j++)
		{
			auto &feature = *newMap->getFeatures()[j];

			//Project
			Eigen::Vector3f xn = posesR[i] * feature.mPosition3D + poseT;
			Eigen::Vector2f imagePosClean = mCamera->projectFromWorld(xn);
			Eigen::Vector2f noise(error_distribution(mRandomDevice), error_distribution(mRandomDevice));
			Eigen::Vector2f imagePos = imagePosClean + noise;

			//Position
			if (i == 0)
				feature.setPosition(imagePos);

			//Skip if outside of image
			if (imagePos[0] < 0 || imagePos[1] < 0 || imagePos[0] > mCamera->getImageSize()[0] || imagePos[1] > mCamera->getImageSize()[1])
				continue;

			//Save distortion and noise errors
			if (mVerbose)
			{
				//Eigen::Vector2f imagePosNoDistortion = mCamera->projectFromDistorted(xn.hnormalized());
				//distortionError.push_back((imagePosClean - imagePosNoDistortion).norm());
				//noiseError.push_back(noise.norm());
			}

			//Measurement
			std::unique_ptr<FeatureMeasurement> m(new FeatureMeasurement(&feature, newFrame.get(), imagePos, 0, nullDescr.data()));
			newFrame->getMeasurements().push_back(m.get());
			feature.getMeasurements().push_back(std::move(m));

			//Save match
			refPoints.push_back(feature.getPosition());
			imgPoints.push_back(imagePos);
		}

		//Write stats
		if (mVerbose)
		{
			MYAPP_LOG << "Frame " << i << "\n";
			if (newFrame->getMeasurements().empty())
				MYAPP_LOG << "AAAAHHHH NO MEASUREMENTS!\n";
			Eigen::Map<Eigen::ArrayXf> _distortionError(distortionError.data(), distortionError.size());
			Eigen::Map<Eigen::ArrayXf> _noiseError(noiseError.data(), noiseError.size());
			MYAPP_LOG << "  Measurement count: " << newFrame->getMeasurements().size() << "\n";
			MYAPP_LOG << "  Max distortion error: " << _distortionError.maxCoeff() << "\n";
			MYAPP_LOG << "  Max noise error: " << _noiseError.maxCoeff() << "\n";
		}


		//Get homography
		Eigen::Matrix<uchar, Eigen::Dynamic, 1> mask(refPoints.size());
		cv::Mat1b mask_cv(refPoints.size(), 1, mask.data());

		HomographyRansac ransac;
		ransac.setParams(3 * expectedPixelNoiseStd, 10, 100, (int)(0.9f * newFrame->getMeasurements().size()));
		ransac.setData(newFrame->getMeasurements());
		ransac.doRansac();

		Eigen::Matrix3fr H = ransac.getBestModel().cast<float>();

		//Refine
		HomographyEstimation hest;
		std::vector<bool> inliersVec;
		std::vector<float> scales(imgPoints.size(), 1);
		H = hest.estimateCeres(H, imgPoints, refPoints, scales, 3 * expectedPixelNoiseStd, inliersVec);

		//Set final
		newFrame->setPose(H);

		//Add
		newMap->addKeyframe(std::move(newFrame));
	}

	return std::move(newMap);
}