Ejemplo n.º 1
0
QgsVectorLayer *QgsAuxiliaryLayer::toSpatialLayer() const
{
  QgsVectorLayer *layer = QgsMemoryProviderUtils::createMemoryLayer( QStringLiteral( "auxiliary_layer" ), fields(), mLayer->wkbType(), mLayer->crs() );

  QString pkField = mJoinInfo.targetFieldName();
  QgsFeature joinFeature;
  QgsFeature targetFeature;
  QgsFeatureIterator it = getFeatures();

  layer->startEditing();
  while ( it.nextFeature( joinFeature ) )
  {
    QString filter = QgsExpression::createFieldEqualityExpression( pkField, joinFeature.attribute( AS_JOINFIELD ) );

    QgsFeatureRequest request;
    request.setFilterExpression( filter );

    mLayer->getFeatures( request ).nextFeature( targetFeature );

    if ( targetFeature.isValid() )
    {
      QgsFeature newFeature( joinFeature );
      newFeature.setGeometry( targetFeature.geometry() );
      layer->addFeature( newFeature );
    }
  }
  layer->commitChanges();

  return layer;
}
Ejemplo n.º 2
0
unsigned int mitk::PlanarFigure::AddFeature( const char *featureName, const char *unitName )
{
  unsigned int index = m_Features.size();

  Feature newFeature( featureName, unitName );
  m_Features.push_back( newFeature );

  return index;
}
Ejemplo n.º 3
0
void TestQgsOfflineEditing::createGeopackageAndSynchronizeBack()
{
  offlineDbFile = "TestQgsOfflineEditing.gpkg";
  QCOMPARE( mpLayer->name(), QStringLiteral( "points" ) );
  QCOMPARE( mpLayer->featureCount(), numberOfFeatures );
  QCOMPARE( mpLayer->fields().size(), numberOfFields );
  QgsFeature firstFeatureBeforeAction;
  QgsFeatureIterator it = mpLayer->getFeatures();
  it.nextFeature( firstFeatureBeforeAction );

  connect( mOfflineEditing, &QgsOfflineEditing::warning, this, []( const QString & title, const QString & message ) { qDebug() << title << message; } );

  //set on LayerTreeNode showFeatureCount property
  QgsLayerTreeLayer *layerTreelayer = QgsProject::instance()->layerTreeRoot()->findLayer( mpLayer->id() );
  layerTreelayer->setCustomProperty( QStringLiteral( "showFeatureCount" ), 1 );
  layerTreelayer->setItemVisibilityChecked( false );

  //convert
  mOfflineEditing->convertToOfflineProject( offlineDataPath, offlineDbFile, layerIds, false, QgsOfflineEditing::GPKG );

  mpLayer = qobject_cast<QgsVectorLayer *>( QgsProject::instance()->mapLayers().first() );
  QCOMPARE( mpLayer->name(), QStringLiteral( "points (offline)" ) );
  QCOMPARE( mpLayer->featureCount(), numberOfFeatures );
  //comparing with the number +1 because GPKG created an fid
  QCOMPARE( mpLayer->fields().size(), numberOfFields + 1 );
  //check LayerTreeNode showFeatureCount property
  layerTreelayer = QgsProject::instance()->layerTreeRoot()->findLayer( mpLayer->id() );
  QCOMPARE( layerTreelayer->customProperty( QStringLiteral( "showFeatureCount" ), 0 ).toInt(), 1 );
  QCOMPARE( layerTreelayer->isVisible(), false );

  QgsFeature firstFeatureInAction;
  it = mpLayer->getFeatures();
  it.nextFeature( firstFeatureInAction );

  //compare some values
  QCOMPARE( firstFeatureInAction.attribute( QStringLiteral( "Class" ) ).toString(), firstFeatureBeforeAction.attribute( QStringLiteral( "Class" ) ).toString() );
  QCOMPARE( firstFeatureInAction.attribute( QStringLiteral( "Heading" ) ).toString(), firstFeatureBeforeAction.attribute( QStringLiteral( "Heading" ) ).toString() );
  QCOMPARE( firstFeatureInAction.attribute( QStringLiteral( "Cabin Crew" ) ).toString(), firstFeatureBeforeAction.attribute( QStringLiteral( "Cabin Crew" ) ).toString() );

  QgsFeature newFeature( mpLayer->dataProvider()->fields() );
  newFeature.setAttribute( QStringLiteral( "Class" ), QStringLiteral( "Superjet" ) );
  mpLayer->startEditing();
  mpLayer->addFeature( newFeature );
  mpLayer->commitChanges();
  QCOMPARE( mpLayer->featureCount(), numberOfFeatures + 1 );

  //unset on LayerTreeNode showFeatureCount property
  layerTreelayer->setCustomProperty( QStringLiteral( "showFeatureCount" ), 0 );

  //synchronize back
  mOfflineEditing->synchronize();

  mpLayer = qobject_cast<QgsVectorLayer *>( QgsProject::instance()->mapLayers().first() );
  QCOMPARE( mpLayer->name(), QStringLiteral( "points" ) );
  QCOMPARE( mpLayer->dataProvider()->featureCount(), numberOfFeatures + 1 );
  QCOMPARE( mpLayer->fields().size(), numberOfFields );
  //check LayerTreeNode showFeatureCount property
  layerTreelayer = QgsProject::instance()->layerTreeRoot()->findLayer( mpLayer->id() );
  QCOMPARE( layerTreelayer->customProperty( QStringLiteral( "showFeatureCount" ), 0 ).toInt(), 0 );

  //get last feature
  QgsFeature f = mpLayer->getFeature( mpLayer->dataProvider()->featureCount() - 1 );
  qDebug() << "FID:" << f.id() << "Class:" << f.attribute( "Class" ).toString();
  QCOMPARE( f.attribute( QStringLiteral( "Class" ) ).toString(), QStringLiteral( "Superjet" ) );

  QgsFeature firstFeatureAfterAction;
  it = mpLayer->getFeatures();
  it.nextFeature( firstFeatureAfterAction );

  QCOMPARE( firstFeatureAfterAction, firstFeatureBeforeAction );

  //and delete the feature again
  QgsFeatureIds idsToClean;
  idsToClean << f.id();
  mpLayer->dataProvider()->deleteFeatures( idsToClean );
  QCOMPARE( mpLayer->dataProvider()->featureCount(), numberOfFeatures );
}
Ejemplo n.º 4
0
void SOM::initializeTraining( QMap<QString, QVariant> somParameters, QVector<FeaturePtr> features )
{
    SOMError::requireCondition( features.count() > 0, "Cannot train with an empty feature set" );

    int featureSize = features.front()->size();
    SOMError::requireCondition( featureSize > 0 , "Feature size must be greater than 0" );

    bool ok = true;

    // Describes the number of epochs of training the SOM will need
    SOMError::requireCondition( somParameters.contains( "maxEpochs" ), "SOM parameters doesn't contain epoch count" );
    maxEpochs = somParameters["maxEpochs"].toInt( &ok );
    SOMError::requireCondition( ok, "Couldn't convert maximum epoch count to an int" );

    // Describes the inital training weight for the SOM
    SOMError::requireCondition(
                somParameters.contains( "initialAlpha" ),
                "SOM parameters doesn't contain initial alpha"
                );
    initialAlpha = somParameters["initialAlpha"].toDouble( &ok );
    /// @todo  Add range check to alpha (hint on values)
    /// @todo  Perhaps use default values if the parameters aren't specified
    SOMError::requireCondition( ok, "Couldn't convert initial alpha to a double" );

    // Describes the inital radius of the training neighborhood for the SOM
    SOMError::requireCondition(
                somParameters.contains( "initialRadiusRatio" ),
                "SOM parameters doesn't contain initial radius ratio"
                );
    double initialRadiusRatio = somParameters["initialRadiusRatio"].toDouble( &ok );
    SOMError::requireCondition( ok, "Couldn't convvert initial radius ratio to a double" );
    SOMError::requireCondition( initialRadiusRatio < 0.5, "Initial radius ratio may not exceed 0.5" );
    SOMError::requireCondition( initialRadiusRatio > 0.0, "Initial radius ratio must be greater than 0" );


    /// @todo  determine if there should be other constraints to alpha and radius ratio (negatives, ffs!)

    alpha_tf = 0.10;   // This is a tuning factor...should not be hardcoded
    alpha_Nf = 0.25;   // This is a tuning factor...should not be hardcoded
    alpha_gamma = -log( alpha_Nf ) / ( alpha_tf * maxEpochs );

    initialRadius = initialRadiusRatio * _grid->diagonal();
    radius_tf = 0.25;   // This is a tuning factor...should not be hardcoded
    radius_Nf = 0.50;   // This is a tuning factor...should not be hardcoded
    radius_gamma = -log( radius_Nf ) / ( radius_tf * maxEpochs );

    currentEpoch = -1;
    nextEpoch();

    // Calculate the normalizer given the training features
    _normalizer->calculateNormalizer( features );

    for( int i=0; i<_grid->capacity(); i++ )
    {
        FeaturePtr newFeature( new Feature( featureSize ) );
        _normalizer->setFeature( newFeature );
        _grid->item(i) = newFeature;
    }

    // Normalize the input features
    _normalizer->normalizeAll( features );
}
Ejemplo n.º 5
0
std::unique_ptr<Map> SceneGenerator::generateFromPoses(const std::vector<Eigen::Matrix3fr> &posesR, const std::vector<Eigen::Vector3f> &posesCenter)
{
	std::unique_ptr<Map> newMap(new Map());

	newMap->mGroundTruthCamera.reset(new CameraModel(*mCamera));

	cv::Mat3b nullImg3(mCamera->getImageSize()[1], mCamera->getImageSize()[0]);
	cv::Mat1b nullImg1(mCamera->getImageSize()[1], mCamera->getImageSize()[0]);
	Eigen::Matrix<uchar, 1, 32> nullDescr;
	nullDescr.setZero();

	float expectedPixelNoiseStd = std::max(0.3f, mNoiseStd);

	//Features
	const int kFeatureCount = 1000;
	std::uniform_real_distribution<float> distributionx(0, mCamera->getImageSize()[0]);
	std::uniform_real_distribution<float> distributiony(0, mCamera->getImageSize()[1]);
	for (int i = 0; i < kFeatureCount; i++)
	{
		std::unique_ptr<Feature> newFeature(new Feature());

		//Get position
		Eigen::Vector2f imagePos(distributionx(mRandomDevice), distributiony(mRandomDevice));
		Eigen::Vector2f xn = mCamera->unprojectToWorld(imagePos).hnormalized();

		newFeature->mGroundTruthPosition3D = Eigen::Vector3f(xn[0], xn[1], 0);
		newFeature->mPosition3D = newFeature->mGroundTruthPosition3D;
		//newFeature->setPosition(imagePos);

		newMap->addFeature(std::move(newFeature));
	}

	//Matlab log of poses
	if (mLogScene)
	{
		//Eigen::MatrixX3f points(newMap->getFeatures().size(), 3);
		//for (int i = 0; i < points.rows(); i++)
		//{
		//	points.row(i) = newMap->getFeatures()[i]->mPosition3D.transpose();
		//}
		//MatlabDataLog::Instance().AddValue("K", mCamera->getK());
		//MatlabDataLog::Instance().AddValue("X", points.transpose());
		//for (int i = 0; i < posesR.size(); i++)
		//{
		//	MatlabDataLog::Instance().AddCell("R", posesR[i]);
		//	MatlabDataLog::Instance().AddCell("center", posesCenter[i]);
		//}
		//return std::unique_ptr<Map>();
	}

	//Frames
	std::normal_distribution<float> error_distribution(0, mNoiseStd);
	for (int i = 0; i < (int)posesR.size(); i++)
	{
		std::unique_ptr<Keyframe> newFrame(new Keyframe());

		newFrame->init(nullImg3, nullImg1);
		
		Eigen::Vector3f poseT = -posesR[i] * posesCenter[i];

		newFrame->mGroundTruthPose3DR = posesR[i];
		newFrame->mGroundTruthPose3DT = poseT;

		//Stats
		std::vector<float> distortionError;
		std::vector<float> noiseError;

		//Points
		std::vector<Eigen::Vector2f> refPoints, imgPoints;
		for (int j = 0; j< (int)newMap->getFeatures().size(); j++)
		{
			auto &feature = *newMap->getFeatures()[j];

			//Project
			Eigen::Vector3f xn = posesR[i] * feature.mPosition3D + poseT;
			Eigen::Vector2f imagePosClean = mCamera->projectFromWorld(xn);
			Eigen::Vector2f noise(error_distribution(mRandomDevice), error_distribution(mRandomDevice));
			Eigen::Vector2f imagePos = imagePosClean + noise;

			//Position
			if (i == 0)
				feature.setPosition(imagePos);

			//Skip if outside of image
			if (imagePos[0] < 0 || imagePos[1] < 0 || imagePos[0] > mCamera->getImageSize()[0] || imagePos[1] > mCamera->getImageSize()[1])
				continue;

			//Save distortion and noise errors
			if (mVerbose)
			{
				//Eigen::Vector2f imagePosNoDistortion = mCamera->projectFromDistorted(xn.hnormalized());
				//distortionError.push_back((imagePosClean - imagePosNoDistortion).norm());
				//noiseError.push_back(noise.norm());
			}

			//Measurement
			std::unique_ptr<FeatureMeasurement> m(new FeatureMeasurement(&feature, newFrame.get(), imagePos, 0, nullDescr.data()));
			newFrame->getMeasurements().push_back(m.get());
			feature.getMeasurements().push_back(std::move(m));

			//Save match
			refPoints.push_back(feature.getPosition());
			imgPoints.push_back(imagePos);
		}

		//Write stats
		if (mVerbose)
		{
			MYAPP_LOG << "Frame " << i << "\n";
			if (newFrame->getMeasurements().empty())
				MYAPP_LOG << "AAAAHHHH NO MEASUREMENTS!\n";
			Eigen::Map<Eigen::ArrayXf> _distortionError(distortionError.data(), distortionError.size());
			Eigen::Map<Eigen::ArrayXf> _noiseError(noiseError.data(), noiseError.size());
			MYAPP_LOG << "  Measurement count: " << newFrame->getMeasurements().size() << "\n";
			MYAPP_LOG << "  Max distortion error: " << _distortionError.maxCoeff() << "\n";
			MYAPP_LOG << "  Max noise error: " << _noiseError.maxCoeff() << "\n";
		}


		//Get homography
		Eigen::Matrix<uchar, Eigen::Dynamic, 1> mask(refPoints.size());
		cv::Mat1b mask_cv(refPoints.size(), 1, mask.data());

		HomographyRansac ransac;
		ransac.setParams(3 * expectedPixelNoiseStd, 10, 100, (int)(0.9f * newFrame->getMeasurements().size()));
		ransac.setData(newFrame->getMeasurements());
		ransac.doRansac();

		Eigen::Matrix3fr H = ransac.getBestModel().cast<float>();

		//Refine
		HomographyEstimation hest;
		std::vector<bool> inliersVec;
		std::vector<float> scales(imgPoints.size(), 1);
		H = hest.estimateCeres(H, imgPoints, refPoints, scales, 3 * expectedPixelNoiseStd, inliersVec);

		//Set final
		newFrame->setPose(H);

		//Add
		newMap->addKeyframe(std::move(newFrame));
	}

	return std::move(newMap);
}