Esempio n. 1
0
	static void reducePickIndex(const vectorMat& in, const cv::Mat& idx, cv::Mat& out) {

		// error checking
		const unsigned int K = in.size();
		if (K == 1) { in[0].copyTo(out); return; }
		double minv, maxv;
		minMaxLoc(idx, &minv, &maxv);
		assert(minv >= 0 && maxv < K);
		for (unsigned int k = 0; k < K; ++k) assert(in[k].size() == idx.size());

		// allocate the output array
		out.create(in[0].size(), in[0].type());

		// perform the indexing
		unsigned int M = in[0].rows;
		unsigned int N = in[0].cols;
		std::vector<const T*> in_ptr(K);
		if (in[0].isContinuous()) { N = M*N; M = 1; }
		for (unsigned int m = 0; m < M; ++m) {
			T* out_ptr = out.ptr<T>(m);
			const int*   idx_ptr = idx.ptr<int>(m);
			for (unsigned int k = 0; k < K; ++k) in_ptr[k] = in[k].ptr<T>(m);
			for (unsigned int n = 0; n < N; ++n) {
				out_ptr[n] = in_ptr[idx_ptr[n]][n];
			}
		}
	}
Esempio n. 2
0
int CSNetHandler::handle_packet(base::packet::Header& header, base::packet::InPacket& in)
{
	
	boost::shared_ptr<base::packet::InPacket> in_ptr(new base::packet::InPacket());
	in_ptr->swap(in);
	CS_THR_POOL::instance().schedule(&CSNetHandler::processTask,this->shared_from_this(),header,in_ptr);

	return 1;

}
Esempio n. 3
0
    static void reduceMax(const vectorMat& in, cv::Mat& maxv, cv::Mat& maxi) {

        // TODO: flatten the input into a multi-channel matrix for faster indexing
        // error checking
        const unsigned int K = in.size();
        if (K == 1) {
            // just return
            in[0].copyTo(maxv);
            maxi = cv::Mat::zeros(in[0].size(), cv::DataType<int>::type);
            return;
        }

        assert (K > 1);
        for (unsigned int k = 1; k < K; ++k) assert(in[k].size() == in[k-1].size());

        // allocate the output matrices
        maxv.create(in[0].size(), in[0].type());
        maxi.create(in[0].size(), cv::DataType<int>::type);

        unsigned int M = in[0].rows;
        unsigned int N = in[0].cols;

        std::vector<const T*> in_ptr(K);
        if (in[0].isContinuous()) {
            N = M*N;
            M = 1;
        }
        for (unsigned int m = 0; m < M; ++m) {
            T* maxv_ptr = maxv.ptr<T>(m);
            int* maxi_ptr = maxi.ptr<int>(m);
            for (unsigned int k = 0; k < K; ++k) in_ptr[k] = in[k].ptr<T>(m);
            for (unsigned int n = 0; n < N; ++n) {
                T v = -std::numeric_limits<T>::infinity();
                int i = 0;
                for (unsigned int k = 0; k < K; ++k) if (in_ptr[k][n] > v) {
                        i = k;
                        v = in_ptr[k][n];
                    }
                maxi_ptr[n] = i;
                maxv_ptr[n] = v;
            }
        }
    }
inline void
normal_diff_filter(
		const pcl::PointCloud<PointT> &in,
		pcl::PointCloud<PointT> &out,
		double scale1 = 0.40, ///The smallest scale to use in the DoN filter.
		double scale2 = 0.60, ///The largest scale to use in the DoN filter.
		double threshold = 0.6 ///The minimum DoN magnitude to threshold by
)
{

	boost::shared_ptr<pcl::PointCloud<PointT> > in_ptr(in.makeShared());


	boost::shared_ptr<pcl::search::KdTree<PointT> > tree_n;
	tree_n.reset( new pcl::search::KdTree<PointT>(false) );
	tree_n->setInputCloud(in_ptr);
	tree_n->setEpsilon(0.5);

	if (scale1 >= scale2)
	{
	    printf("Error: Large scale must be > small scale!");
	    return;
	}

	// Compute normals using both small and large scales at each point
	pcl::NormalEstimationOMP<PointT, pcl::PointNormal> ne;
	ne.setInputCloud (in_ptr);
	ne.setSearchMethod (tree_n);

	/**
	* NOTE: setting viewpoint is very important, so that we can ensure
	* normals are all pointed in the same direction!
	*/
	ne.setViewPoint (std::numeric_limits<float>::max (), std::numeric_limits<float>::max (), std::numeric_limits<float>::max ());

	// calculate normals with the small scale
	pcl::PointCloud<pcl::PointNormal>::Ptr normals_small_scale (new pcl::PointCloud<pcl::PointNormal>);

	ne.setRadiusSearch (scale1);
	ne.compute (*normals_small_scale);

	// calculate normals with the large scale
	pcl::PointCloud<pcl::PointNormal>::Ptr normals_large_scale (new pcl::PointCloud<pcl::PointNormal>);

	ne.setRadiusSearch (scale2);
	ne.compute (*normals_large_scale);

	// Create output cloud for DoN results
	pcl::PointCloud<pcl::PointNormal>::Ptr doncloud (new pcl::PointCloud<pcl::PointNormal>);
	pcl::copyPointCloud<PointT, pcl::PointNormal>(in, *doncloud);

	// Create DoN operator
	pcl::DifferenceOfNormalsEstimation<PointT, pcl::PointNormal, pcl::PointNormal> don;
	don.setInputCloud(in_ptr);
	don.setNormalScaleLarge(normals_large_scale);
	don.setNormalScaleSmall(normals_small_scale);

	if (!don.initCompute ())
	{
		std::cerr << "Error: Could not intialize DoN feature operator" << std::endl;
		return;
	}

	// Compute DoN
	don.computeFeature (*doncloud);


	// Build the condition for filtering
	pcl::ConditionOr<pcl::PointNormal>::Ptr range_cond (
			new pcl::ConditionOr<pcl::PointNormal> ()
	);
	range_cond->addComparison (pcl::FieldComparison<pcl::PointNormal>::ConstPtr (
							   new pcl::FieldComparison<pcl::PointNormal> ("curvature", pcl::ComparisonOps::GT, threshold))
							 );
	// Build the filter

	pcl::ConditionalRemoval<pcl::PointNormal> condrem (range_cond, true);
	condrem.setInputCloud(doncloud);


	pcl::PointCloud<pcl::PointNormal>::Ptr doncloud_filtered (new pcl::PointCloud<pcl::PointNormal>);

	// Apply filter
	condrem.filter(*doncloud_filtered);

	boost::shared_ptr<pcl::PointIndices> indices(new pcl::PointIndices);
	condrem.getRemovedIndices(*indices);

	//std::cout << "Indices: " << indices->indices.size() << " Filtered: " << doncloud_filtered->size () << " data points." << std::endl;

	pcl::ExtractIndices<PointT> extract;
	extract.setInputCloud(in_ptr);
	extract.setIndices(indices);
	extract.setNegative (true);
	extract.filter(out);

	// Save filtered output
	//std::cout << "In: " << in.size() << " Filtered: " << out.size () << " data points." << std::endl;
}