/*! @brief set the filters
 *
 * given a set of filters, split each filter channel into a plane,
 * in preparation for convolution
 *
 * @param filters the filters
 */
void SpatialConvolutionEngine::setFilters(const vectorMat& filters) {

	const size_t N = filters.size();
	filters_.clear();
	filters_.resize(N);

	// split each filter into separate channels, and create a filter engine
	const size_t C = flen_;
	for (size_t n = 0; n < N; ++n) {
		vectorMat filtervec;
		vectorFilterEngine filter_engines(C);
		split(filters[n].reshape(C), filtervec);

		// the first N-1 filters have zero-padding
		for (size_t m = 0; m < C-1; ++m) {
			Ptr<FilterEngine> fe = createLinearFilter(type_, type_,
					filtervec[m], Point(-1,-1), 0, BORDER_CONSTANT, -1, Scalar(0,0,0,0));
			filter_engines[m] = fe;
		}

		// the last filter has one-padding
		Ptr<FilterEngine> fe = createLinearFilter(type_, type_,
				filtervec[C-1], Point(-1,-1), 0, BORDER_CONSTANT, -1, Scalar(1,1,1,1));
		filter_engines[C-1] = fe;
		filters_[n] = filter_engines;
	}
}
void HOGFeatures<T>::setFilters(const vectorMat& filters) {

    const int N = filters.size();
    filters_.clear();
    filters_.resize(N);

    // split each filter into separate channels, and create a filter engine
    const int C = flen_;//filters[0].cols/filters[0].rows;
    for (int n = 0; n < N; ++n) {
        vectorMat filtervec;
        std::vector<Ptr<FilterEngine> > filter_engines(C);
        split(filters[n].reshape(C), filtervec);

        // the first N-1 filters have zero-padding
        for (int m = 0; m < C-1; ++m) {
            Ptr<FilterEngine> fe = createLinearFilter(DataType<T>::type, DataType<T>::type,
                                   filtervec[m], Point(-1,-1), 0, BORDER_CONSTANT, -1, Scalar(0,0,0,0));
            filter_engines[m] = fe;
        }

        // the last filter has one-padding
        Ptr<FilterEngine> fe = createLinearFilter(DataType<T>::type, DataType<T>::type,
                               filtervec[C-1], Point(-1,-1), 0, BORDER_CONSTANT, -1, Scalar(1,1,1,1));
        filter_engines[C-1] = fe;
        filters_[n] = filter_engines;
    }
}
void DynamicProgram<T>::reducePickIndex(const vectorMat& in, const Mat& idx, Mat& out) {

	// error checking
	int K = in.size();
	if (K == 1) { in[0].copyTo(out); return; }
	double minv, maxv;
	minMaxLoc(idx, &minv, &maxv);
	assert(minv >= 0 && maxv < K);
	for (int k = 0; k < K; ++k) assert(in[k].size() == idx.size());

	// allocate the output array
	out.create(in[0].size(), in[0].type());

	// perform the indexing
	int M = in[0].rows;
	int N = in[0].cols;
	vector<const IT*> in_ptr(K);
	if (in[0].isContinuous()) { N = M*N; M = 1; }
	for (int m = 0; m < M; ++m) {
		IT* out_ptr = out.ptr<IT>(m);
		const int*   idx_ptr = idx.ptr<int>(m);
		for (int k = 0; k < K; ++k) in_ptr[k] = in[k].ptr<IT>(m);
		for (int n = 0; n < N; ++n) {
			out_ptr[n] = in_ptr[idx_ptr[n]][n];
		}
	}
}
/*! @brief Calculate the responses of a set of features to a set of filter experts
 *
 * A response represents the likelihood of the part appearing at each location of
 * the feature map. Parts are support vector machines (SVMs) represented as filters.
 * The convolution of a filter with a feature produces a probability density function
 * (pdf) of part location
 * @param features the input features (at different scales, and by extension, size)
 * @param responses the vector of responses (pdfs) to return
 */
void SpatialConvolutionEngine::pdf(const vectorMat& features, vector2DMat& responses) {

	// preallocate the output
	const size_t M = features.size();
	const size_t N = filters_.size();
	responses.resize(M, vectorMat(N));

	// iterate
#ifdef _OPENMP
	#pragma omp parallel for
#endif
	for (size_t n = 0; n < N; ++n) {
		for (size_t m = 0; m < M; ++m) {
			Mat response;
			convolve(features[m], filters_[n], response, flen_);
			responses[m][n] = response;
		}
	}
}
void HOGFeatures<T>::pdf(const vectorMat& features, vector2DMat& responses) {

    // preallocate the output
    int M = features.size();
    int N = filters_.size();
    responses.resize(M, vectorMat(N));
    // iterate
#ifdef _OPENMP
    omp_set_num_threads(8);
    #pragma omp parallel for
#endif
    for (int n = 0; n < N; ++n) {
        for (int m = 0; m < M; ++m) {
            Mat response;
            convolve(features[m], filters_[n], response, flen_);
            responses[m][n] = response;
        }
    }
}
Ejemplo n.º 6
0
    static void reduceMax(const vectorMat& in, cv::Mat& maxv, cv::Mat& maxi) {

        // TODO: flatten the input into a multi-channel matrix for faster indexing
        // error checking
        const unsigned int K = in.size();
        if (K == 1) {
            // just return
            in[0].copyTo(maxv);
            maxi = cv::Mat::zeros(in[0].size(), cv::DataType<int>::type);
            return;
        }

        assert (K > 1);
        for (unsigned int k = 1; k < K; ++k) assert(in[k].size() == in[k-1].size());

        // allocate the output matrices
        maxv.create(in[0].size(), in[0].type());
        maxi.create(in[0].size(), cv::DataType<int>::type);

        unsigned int M = in[0].rows;
        unsigned int N = in[0].cols;

        std::vector<const T*> in_ptr(K);
        if (in[0].isContinuous()) {
            N = M*N;
            M = 1;
        }
        for (unsigned int m = 0; m < M; ++m) {
            T* maxv_ptr = maxv.ptr<T>(m);
            int* maxi_ptr = maxi.ptr<int>(m);
            for (unsigned int k = 0; k < K; ++k) in_ptr[k] = in[k].ptr<T>(m);
            for (unsigned int n = 0; n < N; ++n) {
                T v = -std::numeric_limits<T>::infinity();
                int i = 0;
                for (unsigned int k = 0; k < K; ++k) if (in_ptr[k][n] > v) {
                        i = k;
                        v = in_ptr[k][n];
                    }
                maxi_ptr[n] = i;
                maxv_ptr[n] = v;
            }
        }
    }