void DataPointsFiltersImpl<T>::BoundingBoxDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	const int nbPointsIn = cloud.features.cols();
	const int nbRows = cloud.features.rows();

	int j = 0;
	for (int i = 0; i < nbPointsIn; i++)
	{
		bool keepPt = false;
		const Vector point = cloud.features.col(i);

		// FIXME: improve performance by using Eigen array operations
		const bool x_in = (point(0) > xMin && point(0) < xMax);
		const bool y_in = (point(1) > yMin && point(1) < yMax);
		const bool z_in = (point(2) > zMin && point(2) < zMax) || nbRows == 3;
		const bool in_box = x_in && y_in && z_in;

		if(removeInside)
			keepPt = !in_box;
		else
			keepPt = in_box;

		if(keepPt)
		{
			cloud.setColFrom(j, cloud, i);
			j++;
		}
	}

	cloud.conservativeResize(j);
}
void DataPointsFiltersImpl<T>::MaxQuantileOnAxisDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	if (int(dim) >= cloud.features.rows())
		throw InvalidParameter((boost::format("MaxQuantileOnAxisDataPointsFilter: Error, filtering on dimension number %1%, larger than feature dimensionality %2%") % dim % cloud.features.rows()).str());

	const int nbPointsIn = cloud.features.cols();
	const int nbPointsOut = nbPointsIn * ratio;

	// build array
	vector<T> values;
	values.reserve(cloud.features.cols());
	for (int x = 0; x < cloud.features.cols(); ++x)
		values.push_back(cloud.features(dim, x));

	// get quartiles value
	nth_element(values.begin(), values.begin() + (values.size() * ratio), values.end());
	const T limit = values[nbPointsOut];

	// copy towards beginning the elements we keep
	int j = 0;
	for (int i = 0; i < nbPointsIn; i++)
	{
		if (cloud.features(dim, i) < limit)
		{
			assert(j <= i);
			cloud.setColFrom(j, cloud, i);
			j++;
		}
	}
	assert(j <= nbPointsOut);

	cloud.conservativeResize(j);

}
Ejemplo n.º 3
0
void InspectorsImpl<T>::AbstractVTKInspector::buildGenericAttributeStream(std::ostream& stream, const std::string& attribute, const std::string& nameTag, const DataPoints& cloud, const int forcedDim)
{
	if (!cloud.descriptorExists(nameTag))
		return;
		
	const BOOST_AUTO(desc, cloud.getDescriptorViewByName(nameTag));
	assert(desc.rows() <= forcedDim);

	if(desc.rows() != 0)
	{
		if(attribute.compare("COLOR_SCALARS") == 0)
		{
			stream << attribute << " " << nameTag << " " << forcedDim << "\n";
			stream << padWithOnes(desc, forcedDim, desc.cols()).transpose();
		}
		else
		{
			stream << attribute << " " << nameTag << " float\n";
			if(attribute.compare("SCALARS") == 0)
				stream << "LOOKUP_TABLE default\n";

			stream << padWithZeros(desc, forcedDim, desc.cols()).transpose();
		}
		stream << "\n";
	}
}
void ObservationDirectionDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud)
{
	const int dim(cloud.features.rows() - 1);
	const int featDim(cloud.features.cols());
	if (dim != 2 && dim != 3)
	{
		throw InvalidField(
			(boost::format("ObservationDirectionDataPointsFilter: Error, works only in 2 or 3 dimensions, cloud has %1% dimensions.") % dim).str()
		);
	}

	Vector center(dim);
	center[0] = centerX;
	center[1] = centerY;
	if (dim == 3)
		center[2] = centerZ;

	cloud.allocateDescriptor("observationDirections", dim);
	BOOST_AUTO(observationDirections, cloud.getDescriptorViewByName("observationDirections"));

	for (int i = 0; i < featDim; ++i)
	{
		// Check normal orientation
		const Vector p(cloud.features.block(0, i, dim, 1));
		observationDirections.col(i) = center - p;
	}

}
void DataPointsFiltersImpl<T>::ShadowDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	// Check if normals are present
	if (!cloud.descriptorExists("normals"))
	{
		throw InvalidField("ShadowDataPointsFilter, Error: cannot find normals in descriptors");
	}

	const int dim = cloud.features.rows();

	const BOOST_AUTO(normals, cloud.getDescriptorViewByName("normals"));
	int j = 0;

	for(int i=0; i < cloud.features.cols(); i++)
	{
		const Vector normal = normals.col(i).normalized();
		const Vector point = cloud.features.block(0, i, dim-1, 1).normalized();

		const T value = anyabs(normal.dot(point));

		if(value > eps) // test to keep the points
		{
			cloud.features.col(j) = cloud.features.col(i);
			cloud.descriptors.col(j) = cloud.descriptors.col(i);
			j++;
		}
	}

	cloud.conservativeResize(j);
}
void DataPointsFiltersImpl<T>::OrientNormalsDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	if (!cloud.descriptorExists("normals"))
		throw InvalidField("OrientNormalsDataPointsFilter: Error, cannot find normals in descriptors.");
	if (!cloud.descriptorExists("observationDirections"))
		throw InvalidField("OrientNormalsDataPointsFilter: Error, cannot find observation directions in descriptors.");

	BOOST_AUTO(normals, cloud.getDescriptorViewByName("normals"));
	const BOOST_AUTO(observationDirections, cloud.getDescriptorViewByName("observationDirections"));
	assert(normals.rows() == observationDirections.rows());
	for (int i = 0; i < cloud.features.cols(); i++)
	{
		// Check normal orientation
		const Vector vecP = observationDirections.col(i);
		const Vector vecN = normals.col(i);
		const double scalar = vecP.dot(vecN);

		// Swap normal
		if(towardCenter)
		{
			if (scalar < 0)
				normals.col(i) = -vecN;
		}
		else
		{
			if (scalar > 0)
				normals.col(i) = -vecN;
		}
	}

}
void DataPointsFiltersImpl<T>::MaxDensityDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	typedef typename DataPoints::View View;
	typedef typename DataPoints::ConstView ConstView;

	// Force densities to be computed
	if (!cloud.descriptorExists("densities"))
	{
		throw InvalidField("MaxDensityDataPointsFilter: Error, no densities found in descriptors.");
	}

	const int nbPointsIn = cloud.features.cols();
	View densities = cloud.getDescriptorViewByName("densities");
	const T lastDensity = densities.maxCoeff();
	const int nbSaturatedPts = (densities.cwise() == lastDensity).count();

	// fill cloud values
	int j = 0;
	for (int i = 0; i < nbPointsIn; i++)
	{
		const T density(densities(0,i));
		if (density > maxDensity)
		{
			const float r = (float)std::rand()/(float)RAND_MAX;
			float acceptRatio = maxDensity/density;

			// Handle saturation value of density
			if (density == lastDensity)
			{
				acceptRatio = acceptRatio * (1-nbSaturatedPts/nbPointsIn);
			}

			if (r < acceptRatio)
			{
				cloud.setColFrom(j, cloud, i);
				j++;
			}
		}
		else
		{
			cloud.setColFrom(j, cloud, i);
			j++;
		}
	}

	cloud.conservativeResize(j);
}
void DataPointsFiltersImpl<T>::RandomSamplingDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	const int nbPointsIn = cloud.features.cols();

	int j = 0;
	for (int i = 0; i < nbPointsIn; i++)
	{
		const float r = (float)std::rand()/(float)RAND_MAX;
		if (r < prob)
		{
			cloud.setColFrom(j, cloud, i);
			j++;
		}
	}

	cloud.conservativeResize(j);
}
Ejemplo n.º 9
0
void RemoveNaNDataPointsFilter<T>::inPlaceFilter(
	DataPoints& cloud)
{
	const int nbPointsIn = cloud.features.cols();

	int j = 0;
	for (int i = 0; i < nbPointsIn; ++i)
	{
		const BOOST_AUTO(colArray, cloud.features.col(i).array());
		const BOOST_AUTO(hasNaN, !(colArray == colArray).all());
		if (!hasNaN)
		{
			cloud.setColFrom(j, cloud, i);
			++j;
		}
	}

	cloud.conservativeResize(j);
}
Ejemplo n.º 10
0
void InspectorsImpl<T>::AbstractVTKInspector::buildTensorStream(std::ostream& stream,
	const std::string& name,
	const DataPoints& ref, 
	const DataPoints& reading)
{
			
	const Matrix descRef(ref.getDescriptorByName(name));	
	const Matrix descRead(reading.getDescriptorByName(name));

	if(descRef.rows() != 0 && descRead.rows() != 0)
	{
		stream << "TENSORS " << name << " float\n";

		stream << padWithZeros(
				descRef, 9, ref.descriptors.cols()).transpose();
		stream << "\n";
		stream << padWithZeros(
				descRead, 9, reading.descriptors.cols()).transpose();
		stream << "\n";
	}
}
Ejemplo n.º 11
0
void SimpleSensorNoiseDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud)
{
	cloud.allocateDescriptor("simpleSensorNoise", 1);
	BOOST_AUTO(noise, cloud.getDescriptorViewByName("simpleSensorNoise"));

	switch(sensorType)
	{
	case 0: // Sick LMS-1xx
	{
		noise = computeLaserNoise(0.012, 0.0068, 0.0008, cloud.features);
		break;
	}
	case 1: // Hokuyo URG-04LX
	{
		noise = computeLaserNoise(0.028, 0.0013, 0.0001, cloud.features);
		break;
	}
	case 2: // Hokuyo UTM-30LX
	{
		noise = computeLaserNoise(0.018, 0.0006, 0.0015, cloud.features);
		break;
	}
	case 3: // Kinect / Xtion
	{
		const int dim = cloud.features.rows();
		const Matrix squaredValues(cloud.features.topRows(dim-1).colwise().norm().array().square());
		noise = squaredValues*(0.5*0.00285);
		break;
	}
  case 4: // Sick Tim3xx
  {
    noise = computeLaserNoise(0.004, 0.0053, -0.0092, cloud.features);
    break;
  }
	default:
		throw InvalidParameter(
			(boost::format("SimpleSensorNoiseDataPointsFilter: Error, cannot compute noise for sensorType id %1% .") % sensorType).str());
	}

}
void DataPointsFiltersImpl<T>::MaxDistDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	if (dim >= cloud.features.rows() - 1)
	{
		throw InvalidParameter(
			(boost::format("MaxDistDataPointsFilter: Error, filtering on dimension number %1%, larger than authorized axis id %2%") % dim % (cloud.features.rows() - 2)).str());
	}

	const int nbPointsIn = cloud.features.cols();
	const int nbRows = cloud.features.rows();

	int j = 0;
	if(dim == -1) // Euclidean distance
	{
		for (int i = 0; i < nbPointsIn; i++)
		{
			const T absMaxDist = anyabs(maxDist);
			if (cloud.features.col(i).head(nbRows-1).norm() < absMaxDist)
			{
				cloud.setColFrom(j, cloud, i);
				j++;
			}
		}
	}
	else // Single-axis distance
	{
		for (int i = 0; i < nbPointsIn; i++)
		{
			if ((cloud.features(dim, i)) < maxDist)
			{
				cloud.setColFrom(j, cloud, i);
				j++;
			}
		}
	}

	cloud.conservativeResize(j);
}
void DataPointsFiltersImpl<T>::FixStepSamplingDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	const int iStep(step);
	const int nbPointsIn = cloud.features.cols();
	const int phase(rand() % iStep);

	int j = 0;
	for (int i = phase; i < nbPointsIn; i += iStep)
	{
		cloud.setColFrom(j, cloud, i);
		j++;
	}

	cloud.conservativeResize(j);

	const double deltaStep(startStep * stepMult - startStep);
	step *= stepMult;
	if (deltaStep < 0 && step < endStep)
		step = endStep;
	if (deltaStep > 0 && step > endStep)
		step = endStep;

}
typename ErrorMinimizersImpl<T>::Matrix
ErrorMinimizersImpl<T>::PointToPlaneWithCovErrorMinimizer::estimateCovariance(const DataPoints& reading, const DataPoints& reference, const Matches& matches, const OutlierWeights& outlierWeights, const TransformationParameters& transformation)
{
	int max_nbr_point = outlierWeights.cols();

	Matrix covariance(Matrix::Zero(6,6));
	Matrix J_hessian(Matrix::Zero(6,6));
	Matrix d2J_dReadingdX(Matrix::Zero(6, max_nbr_point));
	Matrix d2J_dReferencedX(Matrix::Zero(6, max_nbr_point));

	Vector reading_point(Vector::Zero(3));
	Vector reference_point(Vector::Zero(3));
	Vector normal(3);
	Vector reading_direction(Vector::Zero(3));
	Vector reference_direction(Vector::Zero(3));

	Matrix normals = reference.getDescriptorViewByName("normals");

	if (normals.rows() < 3)    // Make sure there are normals in DataPoints
		return std::numeric_limits<T>::max() * Matrix::Identity(6,6);

	T beta = -asin(transformation(2,0));
	T alpha = atan2(transformation(2,1), transformation(2,2));
	T gamma = atan2(transformation(1,0)/cos(beta), transformation(0,0)/cos(beta));
	T t_x = transformation(0,3);
	T t_y = transformation(1,3);
	T t_z = transformation(2,3);

	Vector tmp_vector_6(Vector::Zero(6));

	int valid_points_count = 0;

	for(int i = 0; i < max_nbr_point; ++i)
	{
		if (outlierWeights(0,i) > 0.0)
		{
			reading_point = reading.features.block(0,i,3,1);
			int reference_idx = matches.ids(0,i);
			reference_point = reference.features.block(0,reference_idx,3,1);

			normal = normals.block(0,reference_idx,3,1);

			T reading_range = reading_point.norm();
			reading_direction = reading_point / reading_range;
			T reference_range = reference_point.norm();
			reference_direction = reference_point / reference_range;

			T n_alpha = normal(2)*reading_direction(1) - normal(1)*reading_direction(2);
			T n_beta = normal(0)*reading_direction(2) - normal(2)*reading_direction(0);
			T n_gamma = normal(1)*reading_direction(0) - normal(0)*reading_direction(1);

			T E = normal(0)*(reading_point(0) - gamma*reading_point(1) + beta*reading_point(2) + t_x - reference_point(0));
			E +=  normal(1)*(gamma*reading_point(0) + reading_point(1) - alpha*reading_point(2) + t_y - reference_point(1));
			E +=  normal(2)*(-beta*reading_point(0) + alpha*reading_point(1) + reading_point(2) + t_z - reference_point(2));

			T N_reading = normal(0)*(reading_direction(0) - gamma*reading_direction(1) + beta*reading_direction(2));
			N_reading +=  normal(1)*(gamma*reading_direction(0) + reading_direction(1) - alpha*reading_direction(2));
			N_reading +=  normal(2)*(-beta*reading_direction(0) + alpha*reading_direction(1) + reading_direction(2));

			T N_reference = -(normal(0)*reference_direction(0) + normal(1)*reference_direction(1) + normal(2)*reference_direction(2));

			// update the hessian and d2J/dzdx
			tmp_vector_6 << normal(0), normal(1), normal(2), reading_range * n_alpha, reading_range * n_beta, reading_range * n_gamma;

			J_hessian += tmp_vector_6 * tmp_vector_6.transpose();

			tmp_vector_6 << normal(0) * N_reading, normal(1) * N_reading, normal(2) * N_reading, n_alpha * (E + reading_range * N_reading), n_beta * (E + reading_range * N_reading), n_gamma * (E + reading_range * N_reading);

			d2J_dReadingdX.block(0,valid_points_count,6,1) = tmp_vector_6;

			tmp_vector_6 << normal(0) * N_reference, normal(1) * N_reference, normal(2) * N_reference, reference_range * n_alpha * N_reference, reference_range * n_beta * N_reference, reference_range * n_gamma * N_reference;

			d2J_dReferencedX.block(0,valid_points_count,6,1) = tmp_vector_6;

			valid_points_count++;
		} // if (outlierWeights(0,i) > 0.0)
	}

	Matrix d2J_dZdX(Matrix::Zero(6, 2 * valid_points_count));
	d2J_dZdX.block(0,0,6,valid_points_count) = d2J_dReadingdX.block(0,0,6,valid_points_count);
	d2J_dZdX.block(0,valid_points_count,6,valid_points_count) = d2J_dReferencedX.block(0,0,6,valid_points_count);

	Matrix inv_J_hessian = J_hessian.inverse();

	covariance = d2J_dZdX * d2J_dZdX.transpose();
	covariance = inv_J_hessian * covariance * inv_J_hessian;

	return (sensorStdDev * sensorStdDev) * covariance;
}
Ejemplo n.º 15
0
void CovarianceSamplingDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud)
{	
	const std::size_t featDim(cloud.features.rows());
	assert(featDim == 4); //3D pts only
	
	//Check number of points
	const std::size_t nbPoints = cloud.getNbPoints();		
	if(nbSample >= nbPoints)
		return;
	
	//Check if there is normals info
	if (!cloud.descriptorExists("normals"))
		throw InvalidField("OrientNormalsDataPointsFilter: Error, cannot find normals in descriptors.");

	const auto& normals = cloud.getDescriptorViewByName("normals");
	
	std::vector<std::size_t> keepIndexes;
	keepIndexes.resize(nbSample);
	
	///---- Part A, as we compare the cloud with himself, the overlap is 100%, so we keep all points 
	//A.1 and A.2 - Compute candidates
	std::vector<std::size_t> candidates ;
	candidates.resize(nbPoints);
	
	for (std::size_t i = 0; i < nbPoints; ++i) candidates[i] = i;
	
	const std::size_t nbCandidates = candidates.size();
	
	//Compute centroid
	Vector3 center;
	for(std::size_t i = 0; i < featDim - 1; ++i) center(i) = T(0.);
	
	for (std::size_t i = 0; i < nbCandidates; ++i)
		for (std::size_t f = 0; f <= 3; ++f)
			center(f) += cloud.features(f,candidates[i]);
	
	for(std::size_t i = 0; i <= 3; ++i) center(i) /= T(nbCandidates);
	
	//Compute torque normalization
	T Lnorm = 1.0;
	
	if(normalizationMethod == TorqueNormMethod::L1)
	{
		Lnorm = 1.0;
	}
	else if(normalizationMethod == TorqueNormMethod::Lavg)
	{
		Lnorm = 0.0;
		for (std::size_t i = 0; i < nbCandidates; ++i)
			Lnorm += (cloud.features.col(candidates[i]).head(3) - center).norm();
		Lnorm /= nbCandidates;
	}
	else if(normalizationMethod == TorqueNormMethod::Lmax)	
	{	
		const Vector minValues = cloud.features.rowwise().minCoeff();
		const Vector maxValues = cloud.features.rowwise().maxCoeff();
		const Vector3 radii = maxValues.head(3) - minValues.head(3);

		Lnorm = radii.maxCoeff() / 2.; //radii.mean() / 2.; 
	}
	
	//A.3 - Compute 6x6 covariance matrix + EigenVectors
	auto computeCovariance = [Lnorm, nbCandidates, &cloud, &center, &normals, &candidates](Matrix66 & cov) -> void {
			//Compute F matrix, see Eq. (4)
			Eigen::Matrix<T, 6, Eigen::Dynamic> F(6, nbCandidates);
	
			for(std::size_t i = 0; i < nbCandidates; ++i)
			{
				const Vector3 p = cloud.features.col(candidates[i]).head(3) - center; // pi-c
				const Vector3 ni = normals.col(candidates[i]).head(3);
				
				//compute (1 / L) * (pi - c) x ni 
				F.template block<3, 1>(0, i) = (1. / Lnorm) * p.cross(ni);
				//set ni part
				F.template block<3, 1>(3, i) = ni;
			}

			// Compute the covariance matrix Cov = FF'
			cov = F * F.transpose(); 
		};
		
	Matrix66 covariance;
	computeCovariance(covariance);
	
	Eigen::EigenSolver<Matrix66> solver(covariance);		
	const Matrix66  eigenVe = solver.eigenvectors().real();
	const Vector6   eigenVa = solver.eigenvalues().real();
	
	///---- Part B
	//B.1 - Compute the v-6 for each candidate
	std::vector<Vector6, Eigen::aligned_allocator<Vector6>> v; // v[i] = [(pi-c) x ni ; ni ]'
	v.resize(nbCandidates);

	for(std::size_t i = 0; i < nbCandidates; ++i)
	{
		const Vector3 p = cloud.features.col(candidates[i]).head(3) - center; // pi-c
		const Vector3 ni = normals.col(candidates[i]).head(3);
		
		v[i].template block<3, 1>(0, 0) = (1. / Lnorm) * p.cross(ni);
		v[i].template block<3, 1>(3, 0) = ni;
	}
	
	//B.2 - Compute the 6 sorted list based on dot product (vi . Xk) = magnitude, with Xk the kth-EigenVector
	std::vector<std::list<std::pair<int, T>>> L; // contain list of pair (index, magnitude) contribution to the eigens vectors
	L.resize(6);
	
	//sort by decreasing magnitude
	auto comp = [](const std::pair<int, T>& p1, const std::pair<int, T>& p2) -> bool {
			return p1.second > p2.second;
		};
	
	for(std::size_t k = 0; k < 6; ++k)
	{		
		for(std::size_t i = 0; i < nbCandidates; ++i )
		{
			L[k].push_back(std::make_pair(i, std::fabs( v[i].dot(eigenVe.template block<6,1>(0, k)) )));
		}
		
		L[k].sort(comp);
	}	
	
	std::vector<T> t(6, T(0.)); //contains the sums of squared magnitudes
	std::vector<bool> sampledPoints(nbCandidates, false); //maintain flag to avoid resampling the same point in an other list 
	
	///Add point iteratively till we got the desired number of point
	for(std::size_t i = 0; i < nbSample; ++i)
	{
		//B.3 - Equally constrained all eigen vectors		
		// magnitude contribute to t_i where i is the indice of th least contrained eigen vector
		
		//Find least constrained vector
		std::size_t k = 0;
		for (std::size_t i = 0; i < 6; ++i)
		{
			if (t[k] > t[i])
				k = i;
		}
		// Add the point from the top of the list corresponding to the dimension to the set of samples
		while(sampledPoints[L[k].front().first])
			L[k].pop_front(); //remove already sampled point
		
		//Get index to keep
		const std::size_t idToKeep = static_cast<std::size_t>(L[k].front().first);
		L[k].pop_front();
			
		sampledPoints[idToKeep] = true; //set flag to avoid resampling
				
		//B.4 - Update the running total
		for (std::size_t k = 0; k < 6; ++k)
		{
			const T magnitude = v[idToKeep].dot(eigenVe.template block<6, 1>(0, k));
			t[k] += (magnitude * magnitude);
		}
		
		keepIndexes[i] = candidates[idToKeep];
	}

	//TODO: evaluate performances between this solution and sorting the indexes
	// We build map of (old index to new index), in case we sample pts at the begining of the pointcloud
	std::unordered_map<std::size_t, std::size_t> mapidx;
	std::size_t idx = 0;
	
	///(4) Sample the point cloud
	for(std::size_t id : keepIndexes)
	{
		//retrieve index from lookup table if sampling in already switched element
		if(id<idx)
			id = mapidx[id];
		//Switch columns id and idx
		cloud.swapCols(idx, id);	
		//Maintain new index position	
		mapidx[idx] = id;
		//Update index
		++idx;
	}
	cloud.conservativeResize(nbSample);
}
Ejemplo n.º 16
0
bool DataPointsNode::getValue (const String& strMemberName, String& strValue)
{
    bool bValueSet = false;
    DataPoints* pObject = dynamic_cast<DataPoints*>(m_pObject);
    if (strMemberName == L"value")
    {
        ValueObject* pValueObj = dynamic_cast<ValueObject*>(m_pObject);
        if (pValueObj)
        {
            if (!pValueObj->isNothing())
            {
                strValue = pValueObj->toString();
                bValueSet = true;
            }
        }
    }
    else if (strMemberName == L"Name")
    {
        if (pObject->hasValue_Name())
        {
            strValue = (StringObjectImpl(pObject->getName())).toString();
            bValueSet = true;
        }
    }
    else if (strMemberName == L"Desc")
    {
        if (pObject->hasValue_Desc())
        {
            strValue = (StringObjectImpl(pObject->getDesc())).toString();
            bValueSet = true;
        }
    }
    else if (strMemberName == L"Code")
    {
        if (pObject->hasValue_Code())
        {
            strValue = (StringObjectImpl(pObject->getCode())).toString();
            bValueSet = true;
        }
    }
    else if (strMemberName == L"State")
    {
        if (pObject->hasValue_State())
        {
            strValue = (EnumStateTypeImpl(pObject->getState())).toString();
            bValueSet = true;
        }
    }
    else if (strMemberName == L"PntRef")
    {
        if (pObject->hasValue_PntRef())
        {
            strValue = (StringObjectImpl(pObject->getPntRef())).toString();
            bValueSet = true;
        }
    }
    else if (strMemberName == L"PointGeometry")
    {
        if (pObject->hasValue_PointGeometry())
        {
            strValue = (EnumPointGeometryTypeImpl(pObject->getPointGeometry())).toString();
            bValueSet = true;
        }
    }
    else if (strMemberName == L"DTMAttribute")
    {
        if (pObject->hasValue_DTMAttribute())
        {
            strValue = (EnumDTMAttributeTypeImpl(pObject->getDTMAttribute())).toString();
            bValueSet = true;
        }
    }
    return bValueSet;
}
Ejemplo n.º 17
0
bool DataPointsNode::setValue (const String& strMemberName, const String* pstrValue)
{
    bool bValueSet = false;
    DataPoints* pObject = dynamic_cast<DataPoints*>(m_pObject);
    if (strMemberName == L"Name")
    {
        if (!pstrValue)
        {
            pObject->resetValue_Name();
        }
        else
        {
            pObject->setName(StringObjectImpl::parseString(pstrValue->c_str(), pstrValue->length()));
            bValueSet = true;
        }
    }
    if (strMemberName == L"Desc")
    {
        if (!pstrValue)
        {
            pObject->resetValue_Desc();
        }
        else
        {
            pObject->setDesc(StringObjectImpl::parseString(pstrValue->c_str(), pstrValue->length()));
            bValueSet = true;
        }
    }
    if (strMemberName == L"Code")
    {
        if (!pstrValue)
        {
            pObject->resetValue_Code();
        }
        else
        {
            pObject->setCode(StringObjectImpl::parseString(pstrValue->c_str(), pstrValue->length()));
            bValueSet = true;
        }
    }
    if (strMemberName == L"State")
    {
        if (!pstrValue)
        {
            pObject->resetValue_State();
        }
        else
        {
            pObject->setState(EnumStateTypeImpl::parseString(pstrValue->c_str(), pstrValue->length()));
            bValueSet = true;
        }
    }
    if (strMemberName == L"PntRef")
    {
        if (!pstrValue)
        {
            pObject->resetValue_PntRef();
        }
        else
        {
            pObject->setPntRef(StringObjectImpl::parseString(pstrValue->c_str(), pstrValue->length()));
            bValueSet = true;
        }
    }
    if (strMemberName == L"PointGeometry")
    {
        if (!pstrValue)
        {
            pObject->resetValue_PointGeometry();
        }
        else
        {
            pObject->setPointGeometry(EnumPointGeometryTypeImpl::parseString(pstrValue->c_str(), pstrValue->length()));
            bValueSet = true;
        }
    }
    if (strMemberName == L"DTMAttribute")
    {
        if (!pstrValue)
        {
            pObject->resetValue_DTMAttribute();
        }
        else
        {
            pObject->setDTMAttribute(EnumDTMAttributeTypeImpl::parseString(pstrValue->c_str(), pstrValue->length()));
            bValueSet = true;
        }
    }
    return bValueSet;
}
void DataPointsFiltersImpl<T>::VoxelGridDataPointsFilter::inPlaceFilter(DataPoints& cloud)
{
    const int numPoints(cloud.features.cols());
	const int featDim(cloud.features.rows());
	const int descDim(cloud.descriptors.rows());

	assert (featDim == 3 || featDim == 4);

	int insertDim(0);
	if (averageExistingDescriptors)
	{
		// TODO: this should be in the form of an assert
		// Validate descriptors and labels
		for(unsigned int i = 0; i < cloud.descriptorLabels.size(); i++)
			insertDim += cloud.descriptorLabels[i].span;
		if (insertDim != descDim)
			throw InvalidField("VoxelGridDataPointsFilter: Error, descriptor labels do not match descriptor data");
	}

	// TODO: Check that the voxel size is not too small, given the size of the data

	// Calculate number of divisions along each axis
	Vector minValues = cloud.features.rowwise().minCoeff();
	Vector maxValues = cloud.features.rowwise().maxCoeff();

    T minBoundX = minValues.x() / vSizeX;
    T maxBoundX = maxValues.x() / vSizeX;
    T minBoundY = minValues.y() / vSizeY;
    T maxBoundY = maxValues.y() / vSizeY;
    T minBoundZ = 0;
    T maxBoundZ = 0;

    if (featDim == 4)
    {
        minBoundZ = minValues.z() / vSizeZ;
        maxBoundZ = maxValues.z() / vSizeZ;
    }

    // number of divisions is total size / voxel size voxels of equal length + 1
    // with remaining space
    unsigned int numDivX = 1 + maxBoundX - minBoundX;
    unsigned int numDivY = 1 + maxBoundY - minBoundY;;
    unsigned int numDivZ = 0;

    // If a 3D point cloud
    if (featDim == 4 )
        numDivZ = 1 + maxBoundZ - minBoundZ;

    unsigned int numVox = numDivX * numDivY;
    if ( featDim == 4)
        numVox *= numDivZ;

    // Assume point cloud is randomly ordered
    // compute a linear index of the following type
    // i, j, k are the component indices
    // nx, ny number of divisions in x and y components
    // idx = i + j * nx + k * nx * ny
    std::vector<unsigned int> indices(numPoints);

    // vector to hold the first point in a voxel
    // this point will be ovewritten in the input cloud with
    // the output value

    std::vector<Voxel>* voxels;

    // try allocating vector. If too big return error
    try {
    	voxels = new std::vector<Voxel>(numVox);
    } catch (std::bad_alloc&) {
    	throw InvalidParameter((boost::format("VoxelGridDataPointsFilter: Memory allocation error with %1% voxels.  Try increasing the voxel dimensions.") % numVox).str());
    }


    for (int p = 0; p < numPoints; p++ )
    {
        unsigned int i = floor(cloud.features(0,p)/vSizeX - minBoundX);
        unsigned int j = floor(cloud.features(1,p)/vSizeY- minBoundY);
        unsigned int k = 0;
        unsigned int idx;
        if ( featDim == 4 )
        {
            k = floor(cloud.features(2,p)/vSizeZ - minBoundZ);
            idx = i + j * numDivX + k * numDivX * numDivY;
        }
        else
        {
            idx = i + j * numDivX;
        }

        unsigned int pointsInVox = (*voxels)[idx].numPoints + 1;

        if (pointsInVox == 1)
        {
            (*voxels)[idx].firstPoint = p;
        }

        (*voxels)[idx].numPoints = pointsInVox;

        indices[p] = idx;

    }

    // store which points contain voxel position
    std::vector<unsigned int> pointsToKeep;

    // Store voxel centroid in output
    if (useCentroid)
    {
        // Iterate through the indices and sum values to compute centroid
        for (int p = 0; p < numPoints ; p++)
        {
            unsigned int idx = indices[p];
            unsigned int firstPoint = (*voxels)[idx].firstPoint;

            // If this is the first point in the voxel, leave as is
            // if not sum up this point for centroid calculation
            if (firstPoint != p)
            {
            	// Sum up features and descriptors (if we are also averaging descriptors)

            	for (int f = 0; f < (featDim - 1); f++ )
            	{
            		cloud.features(f,firstPoint) += cloud.features(f,p);
            	}

            	if (averageExistingDescriptors) {
            		for (int d = 0; d < descDim; d++)
            		{
            			cloud.descriptors(d,firstPoint) += cloud.descriptors(d,p);
            		}
            	}
            }
        }

        // Now iterating through the voxels
        // Normalize sums to get centroid (average)
        // Some voxels may be empty and are discarded
        for( int idx = 0; idx < numVox; idx++)
        {
            unsigned int numPoints = (*voxels)[idx].numPoints;
            unsigned int firstPoint = (*voxels)[idx].firstPoint;
            if(numPoints > 0)
            {
                for ( int f = 0; f < (featDim - 1); f++ )
                    cloud.features(f,firstPoint) /= numPoints;

                if (averageExistingDescriptors) {
                	for ( int d = 0; d < descDim; d++ )
                		cloud.descriptors(d,firstPoint) /= numPoints;
                }

                pointsToKeep.push_back(firstPoint);
            }
        }
    }
    else
    {
    	// Although we don't sum over the features, we may still need to sum the descriptors
    	if (averageExistingDescriptors)
    	{
    		// Iterate through the indices and sum values to compute centroid
    		for (int p = 0; p < numPoints ; p++)
    		{
    			unsigned int idx = indices[p];
    			unsigned int firstPoint = (*voxels)[idx].firstPoint;

    			// If this is the first point in the voxel, leave as is
    			// if not sum up this point for centroid calculation
    			if (firstPoint != p)
    			{
    				for (int d = 0; d < descDim; d++ )
    				{
    					cloud.descriptors(d,firstPoint) += cloud.descriptors(d,p);
    				}
    			}
    		}
    	}

        for (int idx = 0; idx < numVox; idx++)
        {
            unsigned int numPoints = (*voxels)[idx].numPoints;
            unsigned int firstPoint = (*voxels)[idx].firstPoint;

            if (numPoints > 0)
            {
                // get back voxel indices in grid format
                // If we are in the last division, the voxel is smaller in size
                // We adjust the center as from the end of the last voxel to the bounding area
                unsigned int i = 0;
                unsigned int j = 0;
                unsigned int k = 0;
                if (featDim == 4)
                {
                    k = idx / (numDivX * numDivY);
                    if (k == numDivZ)
                        cloud.features(3,firstPoint) = maxValues.z() - (k-1) * vSizeZ/2;
                    else
                        cloud.features(3,firstPoint) = k * vSizeZ + vSizeZ/2;
                }

                j = (idx - k * numDivX * numDivY) / numDivX;
                if (j == numDivY)
                    cloud.features(2,firstPoint) = maxValues.y() - (j-1) * vSizeY/2;
                else
                    cloud.features(2,firstPoint) = j * vSizeY + vSizeY / 2;

                i = idx - k * numDivX * numDivY - j * numDivX;
                if (i == numDivX)
                    cloud.features(1,firstPoint) = maxValues.x() - (i-1) * vSizeX/2;
                else
                    cloud.features(1,firstPoint) = i * vSizeX + vSizeX / 2;

                // Descriptors : normalize if we are averaging or keep as is
                if (averageExistingDescriptors) {
                	for ( int d = 0; d < descDim; d++ )
                		cloud.descriptors(d,firstPoint) /= numPoints;
                }

                pointsToKeep.push_back(firstPoint);
            }
        }

    }

    // deallocate memory for voxels information
    delete voxels;

    // Move the points to be kept to the start
    // Bring the data we keep to the front of the arrays then
	// wipe the leftover unused space.
	std::sort(pointsToKeep.begin(), pointsToKeep.end());
	int numPtsOut = pointsToKeep.size();
	for (int i = 0; i < numPtsOut; i++){
		int k = pointsToKeep[i];
		assert(i <= k);
		cloud.features.col(i) = cloud.features.col(k);
		if (cloud.descriptors.rows() != 0)
			cloud.descriptors.col(i) = cloud.descriptors.col(k);
	}
	cloud.features.conservativeResize(Eigen::NoChange, numPtsOut);
	
	if (cloud.descriptors.rows() != 0)
		cloud.descriptors.conservativeResize(Eigen::NoChange, numPtsOut);
}
void DataPointsFiltersImpl<T>::SamplingSurfaceNormalDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	typedef Matrix Features;
	typedef typename DataPoints::View View;
	typedef typename DataPoints::Label Label;
	typedef typename DataPoints::Labels Labels;

	const int pointsCount(cloud.features.cols());
	const int featDim(cloud.features.rows());
	const int descDim(cloud.descriptors.rows());

	int insertDim(0);
	if (averageExistingDescriptors)
	{
		// TODO: this should be in the form of an assert
		// Validate descriptors and labels
		for(unsigned int i = 0; i < cloud.descriptorLabels.size(); i++)
			insertDim += cloud.descriptorLabels[i].span;
		if (insertDim != descDim)
			throw InvalidField("SamplingSurfaceNormalDataPointsFilter: Error, descriptor labels do not match descriptor data");
	}

	// Compute space requirement for new descriptors
	const int dimNormals(featDim-1);
	const int dimDensities(1);
	const int dimEigValues(featDim-1);
	const int dimEigVectors((featDim-1)*(featDim-1));

	// Allocate space for new descriptors
	Labels cloudLabels;
	if (keepNormals)
		cloudLabels.push_back(Label("normals", dimNormals));
	if (keepDensities)
		cloudLabels.push_back(Label("densities", dimDensities));
	if (keepEigenValues)
		cloudLabels.push_back(Label("eigValues", dimEigValues));
	if (keepEigenVectors)
		cloudLabels.push_back(Label("eigVectors", dimEigVectors));
	cloud.allocateDescriptors(cloudLabels);

	// we keep build data on stack for reentrant behaviour
	View cloudExistingDescriptors(cloud.descriptors.block(0,0,cloud.descriptors.rows(),cloud.descriptors.cols()));
	BuildData buildData(cloud.features, cloud.descriptors);

	// get views
	if (keepNormals)
		buildData.normals = cloud.getDescriptorViewByName("normals");
	if (keepDensities)
		buildData.densities = cloud.getDescriptorViewByName("densities");
	if (keepEigenValues)
		buildData.eigenValues = cloud.getDescriptorViewByName("eigValues");
	if (keepEigenVectors)
		buildData.eigenVectors = cloud.getDescriptorViewByName("eigVectors");
	// build the new point cloud
	buildNew(
		buildData,
		0,
		pointsCount,
		cloud.features.rowwise().minCoeff(),
		cloud.features.rowwise().maxCoeff()
	);

	// Bring the data we keep to the front of the arrays then
	// wipe the leftover unused space.
	std::sort(buildData.indicesToKeep.begin(), buildData.indicesToKeep.end());
	int ptsOut = buildData.indicesToKeep.size();
	for (int i = 0; i < ptsOut; i++){
		int k = buildData.indicesToKeep[i];
		assert(i <= k);
		cloud.features.col(i) = cloud.features.col(k);
		if (cloud.descriptors.rows() != 0)
			cloud.descriptors.col(i) = cloud.descriptors.col(k);
		if(keepNormals)
			buildData.normals->col(i) = buildData.normals->col(k);
		if(keepDensities)
			(*buildData.densities)(0,i) = (*buildData.densities)(0,k);
		if(keepEigenValues)
			buildData.eigenValues->col(i) = buildData.eigenValues->col(k);
		if(keepEigenVectors)
			buildData.eigenVectors->col(i) = buildData.eigenVectors->col(k);
	}
	cloud.features.conservativeResize(Eigen::NoChange, ptsOut);
	cloud.descriptors.conservativeResize(Eigen::NoChange, ptsOut);

	// warning if some points were dropped
	if(buildData.unfitPointsCount != 0)
		LOG_INFO_STREAM("  SamplingSurfaceNormalDataPointsFilter - Could not compute normal for " << buildData.unfitPointsCount << " pts.");
}
void DataPointsFiltersImpl<T>::SurfaceNormalDataPointsFilter::inPlaceFilter(
	DataPoints& cloud)
{
	typedef typename DataPoints::View View;
	typedef typename DataPoints::Label Label;
	typedef typename DataPoints::Labels Labels;
	typedef typename MatchersImpl<T>::KDTreeMatcher KDTreeMatcher;
	typedef typename PointMatcher<T>::Matches Matches;

	const int pointsCount(cloud.features.cols());
	const int featDim(cloud.features.rows());
	const int descDim(cloud.descriptors.rows());

	// Validate descriptors and labels
	int insertDim(0);
	for(unsigned int i = 0; i < cloud.descriptorLabels.size(); i++)
		insertDim += cloud.descriptorLabels[i].span;
	if (insertDim != descDim)
		throw InvalidField("SurfaceNormalDataPointsFilter: Error, descriptor labels do not match descriptor data");

	// Reserve memory for new descriptors
	const int dimNormals(featDim-1);
	const int dimDensities(1);
	const int dimEigValues(featDim-1);
	const int dimEigVectors((featDim-1)*(featDim-1));
	//const int dimMatchedIds(knn);

	boost::optional<View> normals;
	boost::optional<View> densities;
	boost::optional<View> eigenValues;
	boost::optional<View> eigenVectors;
	boost::optional<View> matchedValues;

	Labels cloudLabels;
	if (keepNormals)
		cloudLabels.push_back(Label("normals", dimNormals));
	if (keepDensities)
		cloudLabels.push_back(Label("densities", dimDensities));
	if (keepEigenValues)
		cloudLabels.push_back(Label("eigValues", dimEigValues));
	if (keepEigenVectors)
		cloudLabels.push_back(Label("eigVectors", dimEigVectors));
	cloud.allocateDescriptors(cloudLabels);

	if (keepNormals)
		normals = cloud.getDescriptorViewByName("normals");
	if (keepDensities)
		densities = cloud.getDescriptorViewByName("densities");
	if (keepEigenValues)
		eigenValues = cloud.getDescriptorViewByName("eigValues");
	if (keepEigenVectors)
		eigenVectors = cloud.getDescriptorViewByName("eigVectors");
	// TODO: implement keepMatchedIds
//	if (keepMatchedIds)
//	{
//		cloud.allocateDescriptor("normals", dimMatchedIds);
//		matchedValues = cloud.getDescriptorViewByName("normals");
//	}

	// Build kd-tree
	Parametrizable::Parameters param;
	boost::assign::insert(param) ( "knn", toParam(knn) );
	boost::assign::insert(param) ( "epsilon", toParam(epsilon) );
	KDTreeMatcher matcher(param);
	matcher.init(cloud);

	Matches matches(typename Matches::Dists(knn, pointsCount), typename Matches::Ids(knn, pointsCount));
	matches = matcher.findClosests(cloud);

	// Search for surrounding points and compute descriptors
	int degenerateCount(0);
	for (int i = 0; i < pointsCount; ++i)
	{
		// Mean of nearest neighbors (NN)
		Matrix d(featDim-1, knn);
		for(int j = 0; j < int(knn); j++)
		{
			const int refIndex(matches.ids(j,i));
			d.col(j) = cloud.features.block(0, refIndex, featDim-1, 1);
		}

		const Vector mean = d.rowwise().sum() / T(knn);
		const Matrix NN = d.colwise() - mean;

		const Matrix C(NN * NN.transpose());
		Vector eigenVa = Vector::Identity(featDim-1, 1);
		Matrix eigenVe = Matrix::Identity(featDim-1, featDim-1);
		// Ensure that the matrix is suited for eigenvalues calculation
		if(keepNormals || keepEigenValues || keepEigenVectors)
		{
			if(C.fullPivHouseholderQr().rank()+1 >= featDim-1)
			{
				const Eigen::EigenSolver<Matrix> solver(C);
				eigenVa = solver.eigenvalues().real();
				eigenVe = solver.eigenvectors().real();
			}
			else
			{
				//std::cout << "WARNING: Matrix C needed for eigen decomposition is degenerated. Expected cause: no noise in data" << std::endl;
				++degenerateCount;
			}
		}

		if(keepNormals)
			normals->col(i) = computeNormal(eigenVa, eigenVe);
		if(keepDensities)
			(*densities)(0, i) = computeDensity(NN);
		if(keepEigenValues)
			eigenValues->col(i) = eigenVa;
		if(keepEigenVectors)
			eigenVectors->col(i) = serializeEigVec(eigenVe);
	}
	if (degenerateCount)
	{
		LOG_WARNING_STREAM("WARNING: Matrix C needed for eigen decomposition was degenerated in " << degenerateCount << " points over " << pointsCount << " (" << float(degenerateCount)*100.f/float(pointsCount) << " %)");
	}

}