void CCorrelationFilters::prepare_data(struct CDataStruct *img, struct CParamStruct *params)
{
    normalize_data(img);
    img->size_data_freq = params->size_filt_freq;
    fft_data(img);
    params->num_elements_freq = img->num_elements_freq;
    compute_psd_matrix(img, params);
    compute_inverse_psd_matrix(img, params);
}
void CCorrelationFilters::apply_filter(struct CDataStruct *corr, struct CDataStruct *img, struct CFilterStruct *filt)
{
	int num_dim1 = img->size_data.size();
	int num_dim2 = filt->filter.size_data.size();
	
	assert(num_dim1 == num_dim2 && "Something wrong, filter and image have different rank!!");
    assert(img->num_channels==filt->filter.num_channels && "Something wrong, filter and image have different number of channels!!");
	
	int *siz = new int(num_dim1);
	
	for (int i=0; i<num_dim1; i++) {
//		siz[i] = img->size_data(i)+filt->filter.size_data(i)-1;
        siz[i] = img->size_data(i);
	}
	
	Eigen::Map<Eigen::VectorXi> size_data_freq(siz,num_dim1);
	img->size_data_freq = size_data_freq;
	filt->filter.size_data_freq = size_data_freq;
	
	fft_data(img);
	fft_data(&filt->filter);
    
	double *data;
	data = new double[img->num_data*filt->filter.num_data*img->size_data_freq.prod()];
	corr->data = data;
    corr->size_data_freq = img->size_data_freq;
    corr->size_data = corr->size_data_freq;
	
	int num_elements_freq = img->num_elements_freq;
	int num_elements_data = img->size_data_freq.prod();
	
	Eigen::ArrayXXcd tmp1 = Eigen::ArrayXcd::Zero(num_elements_freq*img->num_channels,1);
	Eigen::ArrayXXcd tmp2 = Eigen::ArrayXcd::Zero(num_elements_freq*filt->filter.num_channels,1);
	Eigen::ArrayXcd temp = Eigen::ArrayXcd::Zero(num_elements_freq);
	Eigen::Map<Eigen::ArrayXXcd> temp1(img->data_freq,num_elements_freq*img->num_channels,img->num_data);
	Eigen::Map<Eigen::ArrayXXcd> temp2(filt->filter.data_freq,num_elements_freq*filt->filter.num_channels,filt->filter.num_data);
	
	CDataStruct tmp;
	tmp.size_data = img->size_data;
	tmp.size_data_freq = img->size_data_freq;
	tmp.num_data = 1;
	tmp.num_channels = 1;
	tmp.ptr_data.reserve(tmp.num_data);
	tmp.ptr_data_freq.reserve(tmp.num_data);
	tmp.data_freq = new complex<double>[num_elements_freq];
	
	for (int i=0; i<img->num_data; i++) {
		tmp1 = temp1.block(0,i,num_elements_freq*img->num_channels,1);
		tmp1.resize(num_elements_freq,img->num_channels);
		for (int j=0; j<filt->filter.num_data; j++) {
			tmp2 = temp2.block(0,j,num_elements_freq*img->num_channels,1);
			tmp2.resize(num_elements_freq,img->num_channels);
            
			temp = temp*0;
			for (int k=0; k<img->num_channels; k++) {
				temp += tmp1.block(0,k,num_elements_freq,1)*tmp2.block(0,k,num_elements_freq,1).conjugate();
			}

			Eigen::Map<Eigen::ArrayXcd> (tmp.data_freq,num_elements_freq) = temp;
			ifft_data(&tmp);
			memcpy((corr->data + (i*filt->filter.num_data+j)*num_elements_data), tmp.data, sizeof(double)*num_elements_data);
			delete[] tmp.data;
		}
	}
	
	filt->filter.size_data_freq = filt->filter.size_data;
	img->size_data_freq = img->size_data;
}
void CCorrelationFilters::build_mmcf(struct CDataStruct *img, struct CParamStruct *params, struct CFilterStruct *filt)
{
    /*
	 * This function calls the correlation filter design proposed in the following publications.
     *
	 * A. Rodriguez, Vishnu Naresh Boddeti, B.V.K. Vijaya Kumar and A. Mahalanobis, "Maximum Margin Correlation Filter: A New Approach for Localization and Classification", IEEE Transactions on Image Processing, 2012.
     *
	 * Vishnu Naresh Boddeti, "Advances in Correlation Filters: Vector Features, Structured Prediction and Shape Alignment" PhD thesis, Carnegie Mellon University, Pittsburgh, PA, USA, 2012.
     *
	 * Vishnu Naresh Boddeti and B.V.K. Vijaya Kumar, "Maximum Margin Vector Correlation Filters," Arxiv 1404.6031 (April 2014).
	 *
	 * Notes: This currently the best performing Correlation Filter design, especially when the training sample size is larger than the dimensionality of the data.
	 */
	
	filt->params = *params;
	filt->filter.size_data = params->size_filt_freq;
	filt->filter.size_data_freq = params->size_filt_freq;
	
	filt->filter.num_elements_freq = img->num_elements_freq;
	params->num_elements_freq = img->num_elements_freq;
	filt->filter.data_freq = new complex<double>[img->num_elements_freq*img->num_channels];
	
	Eigen::ArrayXcd filt_freq = Eigen::ArrayXcd::Zero(params->num_elements_freq*img->num_channels);
	
	// If not set default to 1
	if (params->wpos < 1) params->wpos = 1;
	filt->params.wpos = params->wpos;
	
	compute_psd_matrix(img, params);
	Eigen::MatrixXcd Y = Eigen::MatrixXcd::Zero(img->num_elements_freq*img->num_channels,img->num_data);
	Eigen::MatrixXcd u = Eigen::MatrixXcd::Zero(img->num_data,1);
	Eigen::MatrixXd temp = Eigen::MatrixXd::Zero(img->num_data,img->num_data);
	
	Eigen::Map<Eigen::MatrixXcd> X(img->data_freq,img->num_elements_freq*img->num_channels,img->num_data);
	
	Eigen::ArrayXXcd temp1 = Eigen::ArrayXXcd::Zero(img->num_elements_freq,img->num_channels);
	Eigen::ArrayXXcd temp2 = Eigen::ArrayXXcd::Zero(img->num_elements_freq,img->num_channels);
	Eigen::Vector2i num_blocks_1, num_blocks_2;
	
	num_blocks_1 << img->num_channels,img->num_channels;
	num_blocks_2 << img->num_channels,1;
	
	for (int k=0;k<img->num_data;k++){
        
        temp2 = X.block(0,k,img->num_elements_freq*img->num_channels,1).array();
        temp2.resize(img->num_elements_freq,img->num_channels);
        fusion_matrix_multiply(temp1, img->Sinv, temp2, num_blocks_1, num_blocks_2);
        temp1.resize(img->num_elements_freq*img->num_channels,1);
        Y.block(0,k,img->num_elements_freq*img->num_channels,1) = temp1.matrix();
        temp1.resize(img->num_elements_freq,img->num_channels);
        
		if (img->labels[k] == 1)
		{
			u(k) = std::complex<double>(params->wpos,0);
		}
		else
		{
			u(k) = std::complex<double>(-1,0);
		}
	}
	
	esvm::SVMClassifier libsvm;
	
	libsvm.setC(params->C);
	libsvm.setKernel(params->kernel_type);
	libsvm.setWpos(params->wpos);
	
	temp = (X.conjugate().transpose()*Y).real();
	Eigen::Map<Eigen::MatrixXd> y(img->labels,img->num_data,1);
	
	libsvm.train(temp, y);
	temp.resize(0,0);
	
	int nSV;
	libsvm.getNSV(&nSV);
	Eigen::VectorXi sv_indices = Eigen::VectorXi::Zero(nSV);
	Eigen::VectorXd sv_coef = Eigen::VectorXd::Zero(nSV);
	libsvm.getSI(sv_indices);
	libsvm.getCoeff(sv_coef);
	
	for (int k=0; k<nSV; k++) {
		filt_freq += (Y.block(0,sv_indices[k]-1,img->num_elements_freq*img->num_channels,1)*sv_coef[k]).array();
	}
	
	Y.resize(0,0);
	
	Eigen::Map<Eigen::ArrayXcd>(filt->filter.data_freq,img->num_elements_freq*img->num_channels) = filt_freq;
	filt->filter.num_data = 1;
	filt->filter.num_channels = img->num_channels;
	filt->filter.ptr_data.reserve(filt->filter.num_data);
	filt->filter.ptr_data_freq.reserve(filt->filter.num_data);
	ifft_data(&filt->filter);
    fft_data(&filt->filter);
}
void CCorrelationFilters::build_otsdf(struct CDataStruct *img, struct CParamStruct *params, struct CFilterStruct *filt)
{
    /*
	 * This function implements the correlation filter design proposed in the following publications.
	 * 
     * [1] Optimal trade-off synthetic discriminant function filters for arbitrary devices, B.V.K.Kumar, D.W.Carlson, A.Mahalanobis - Optics Letters, 1994.
	 *
	 * [2] Jason Thornton, "Matching deformed and occluded iris patterns: a probabilistic model based on discriminative cues." PhD thesis, Carnegie Mellon University, Pittsburgh, PA, USA, 2007.
	 *
	 * [3] Vishnu Naresh Boddeti, Jonathon M Smereka, and B. V. K. Vijaya Kumar, "A comparative evaluation of iris and ocular recognition methods on challenging ocular images." IJCB, 2011.
	 *
     * [4] A. Mahalanobis, B.V.K. Kumar, D. Casasent, "Minimum average correlation energy filters," Applied Optics, 1987
     *
	 * Notes: This filter design is good when the dimensionality of the data is greater than the training sample size. Setting the filter parameter params->alpha=0 results in the popular MACE filter. However, it is usually better to set alpha to a small number rather than setting it to 0. If you use MACE cite [4].
	 */

	filt->params = *params;
	filt->filter.size_data = params->size_filt_freq;
	filt->filter.size_data_freq = params->size_filt_freq;

	filt->filter.num_elements_freq = img->num_elements_freq;
	params->num_elements_freq = img->num_elements_freq;
	filt->filter.data_freq = new complex<double>[img->num_elements_freq*img->num_channels];
	
	Eigen::ArrayXcd filt_freq = Eigen::ArrayXcd::Zero(params->num_elements_freq*img->num_channels);
	
	// If not set default to 1
	if (params->wpos < 1) params->wpos = 1;
	filt->params.wpos = params->wpos;
	
	compute_psd_matrix(img, params);
	Eigen::MatrixXcd Y = Eigen::MatrixXcd::Zero(img->num_elements_freq*img->num_channels,img->num_data);
	Eigen::MatrixXcd u = Eigen::MatrixXcd::Zero(img->num_data,1);
	Eigen::MatrixXcd temp = Eigen::MatrixXcd::Zero(img->num_data,img->num_data);
	Eigen::MatrixXd tmp = Eigen::MatrixXd::Zero(img->num_data,img->num_data);
	
	Eigen::Map<Eigen::MatrixXcd> X(img->data_freq,img->num_elements_freq*img->num_channels,img->num_data);
	
	Eigen::ArrayXXcd temp1 = Eigen::ArrayXXcd::Zero(img->num_elements_freq,img->num_channels);
	Eigen::ArrayXXcd temp2 = Eigen::ArrayXXcd::Zero(img->num_elements_freq,img->num_channels);
	Eigen::Vector2i num_blocks_1, num_blocks_2;
	
	num_blocks_1 << img->num_channels,img->num_channels;
	num_blocks_2 << img->num_channels,1;
	
	for (int k=0;k<img->num_data;k++){
        temp2 = X.block(0,k,img->num_elements_freq*img->num_channels,1).array();
        temp2.resize(img->num_elements_freq,img->num_channels);
        fusion_matrix_multiply(temp1, img->Sinv, temp2, num_blocks_1, num_blocks_2);
        temp1.resize(img->num_elements_freq*img->num_channels,1);
        Y.block(0,k,img->num_elements_freq*img->num_channels,1) = temp1.matrix();
        temp1.resize(img->num_elements_freq,img->num_channels);
        
		if (img->labels[k] == 1)
		{
			u(k) = std::complex<double>(params->wpos,0);
		}
		else
		{
			u(k) = std::complex<double>(1,0);
		}
	}

	temp = X.conjugate().transpose()*Y;
	temp = temp.ldlt().solve(u);
	filt_freq = Y*temp;
	
	Y.resize(0,0);
	
	Eigen::Map<Eigen::ArrayXcd>(filt->filter.data_freq,img->num_elements_freq*img->num_channels) = filt_freq;
	filt->filter.num_data = 1;
	filt->filter.num_channels = img->num_channels;
	filt->filter.ptr_data.reserve(filt->filter.num_data);
	filt->filter.ptr_data_freq.reserve(filt->filter.num_data);
	ifft_data(&filt->filter);
    fft_data(&filt->filter);
}
void CCorrelationFilters::build_uotsdf(struct CDataStruct *img, struct CParamStruct *params, struct CFilterStruct *filt)
{
	/*
	 * This function implements the correlation filter design proposed in the following publications.
     *
	 * [1] Vishnu Naresh Boddeti, Takeo Kanade and B.V.K. Vijaya Kumar, "Correlation Filters for Object Alignment," CVPR 2013.
     *
     * [2] A. Mahalanobis, B. V. K. Vijaya Kumar, S. Song, S. Sims, and J. Epperson. Unconstrained correlation filters. Applied Optics, 1994.
     
	 * Notes: This is currently the fastest Correlation Filter design to train, and is highly amenable for real-time and online learning or for object tracking. While in [1] we use this filter with HOG features, the filter design is general enough to be used with any other vector feature representation, for example a Gabor filter bank. Setting the filter parameter params->alpha=0 results in the unconstrained MACE filter.
	 */
	
	int num_pos=0,num_neg=0;
	
	filt->params = *params;
	filt->filter.size_data = params->size_filt_freq;
	filt->filter.size_data_freq = params->size_filt_freq;
	
	filt->filter.num_elements_freq = img->num_elements_freq;
	params->num_elements_freq = img->num_elements_freq;
	filt->filter.data_freq = new complex<double>[img->num_elements_freq*img->num_channels];
	
    Eigen::ArrayXXcd filt_freq = Eigen::ArrayXcd::Zero(params->num_elements_freq*img->num_channels,1);
	Eigen::ArrayXXcd pos_mean_freq = Eigen::ArrayXcd::Zero(params->num_elements_freq*img->num_channels,1);
    Eigen::ArrayXXcd neg_mean_freq = Eigen::ArrayXcd::Zero(params->num_elements_freq*img->num_channels,1);
	
	// If not set default to 1
	if (params->wpos < 1) params->wpos = 1;
	filt->params.wpos = params->wpos;
	
	Eigen::Map<Eigen::ArrayXXcd> X(img->data_freq,img->num_elements_freq*img->num_channels,img->num_data);
	
	Eigen::ArrayXXcd temp1 = Eigen::ArrayXXcd::Zero(img->num_elements_freq,img->num_channels);
	Eigen::ArrayXXcd temp2 = Eigen::ArrayXXcd::Zero(img->num_elements_freq,img->num_channels);
	Eigen::Vector2i num_blocks_1, num_blocks_2;
	
	num_blocks_1 << img->num_channels,img->num_channels;
	num_blocks_2 << img->num_channels,1;
	
	switch ((int) params->neg_flag)
	{
        // Case 0: Use only positive images, usually super fast.
		case 0:
			for (int k=0;k<img->num_data;k++){
				if (img->labels[k] == 1)
				{
					pos_mean_freq += X.block(0,k,img->num_elements_freq*img->num_channels,1)*params->wpos;
					num_pos++;
				}
			}
        // Case 1: Use only negative images, useful for online learning purposes.
		case 1:
			for (int k=0;k<img->num_data;k++){
				if (img->labels[k] == -1)
				{
					neg_mean_freq += X.block(0,k,img->num_elements_freq*img->num_channels,1);
					num_neg++;
				}
			}
        // Case 2: Use both positive and negative images, usually gets you the best performance.
		case 2:
			for (int k=0;k<img->num_data;k++){
				if (img->labels[k] == 1)
				{
					pos_mean_freq += X.block(0,k,img->num_elements_freq*img->num_channels,1)*params->wpos;
					num_pos++;
				}
				else
				{
					neg_mean_freq += X.block(0,k,img->num_elements_freq*img->num_channels,1);
					num_neg++;
				}
			}
	}
	
    filt_freq = pos_mean_freq/(num_pos*params->wpos)-neg_mean_freq/max(num_neg,1);
	filt_freq.resize(img->num_elements_freq,img->num_channels);
	fusion_matrix_multiply(temp1, img->Sinv, filt_freq, num_blocks_1, num_blocks_2);
	temp1.resize(img->num_elements_freq*img->num_channels,1);
	Eigen::Map<Eigen::ArrayXcd>(filt->filter.data_freq,img->num_elements_freq*img->num_channels) = temp1;
	
	filt->filter.num_data = 1;
	filt->filter.num_channels = img->num_channels;
	filt->filter.ptr_data.reserve(filt->filter.num_data);
	filt->filter.ptr_data_freq.reserve(filt->filter.num_data);
	ifft_data(&filt->filter);
    fft_data(&filt->filter);
}
示例#6
0
void SingleParticle2dx::Methods::BasicCalcCCMethod::calculateCrossCorrelation( SingleParticle2dx::DataStructures::Particle& part, SingleParticle2dx::DataStructures::Projection2d& proj, real_array2d_type& res)
{
    value_type nx = part.getSizeX();
    value_type ny = part.getSizeY();

    fft_array2d_type fft_data( boost::extents[nx][ny] );

    real_array2d_type real_data( boost::extents[nx][ny] );
    real_array2d_type real_data_shifted( boost::extents[nx][ny] );

    fft_type part_image(0,0);
    fft_type ref_image(0,0);
    value_type r2;
    value_type w;

    SingleParticle2dx::ConfigContainer* config = SingleParticle2dx::ConfigContainer::Instance();

    for (size_type i=0; i<part.getSizeX(); i++)
    {
        for (size_type j=0; j<part.getSizeY(); j++)
        {
            r2 = sqrt( (i-nx/2)*(i-nx/2) + (j-ny/2)*(j-ny/2) );
            {
                if ( (r2<config->getMaxCCFreq()) && (r2>config->getMinCCFreq()) )
                {
                    w = 1;
                }
                else if ( r2<(config->getMaxCCFreq()+config->getCCFreqSigma()) )
                {
                    value_type dr = r2-config->getMaxCCFreq();
                    w = 1 - dr/config->getCCFreqSigma();
                }
                else
                {
                    w = 0;
                }

                part_image = part(i,j);
                ref_image = proj(i,j);
                fft_data[i][j].real(part_image.real() * ref_image.real() + part_image.imag() * ref_image.imag());
                fft_data[i][j].imag(-part_image.real() * ref_image.imag() + part_image.imag() * ref_image.real());

                fft_data[i][j] *= w;
            }
        }
    }

    SingleParticle2dx::Utilities::FFTCalculator::performBackwardFFT (&fft_data, &real_data);

    size_type half_window = floor(SingleParticle2dx::ConfigContainer::Instance()->getCrossCorrelationWindowSize()/2.);

    int ii,jj;
    for (int i=0; i<nx; i++)
    {
        for (int j=0; j<ny; j++)
        {
            ii = i - nx/2;
            jj = j - nx/2;

            if (ii<0)
            {
                ii += nx;
            }

            if (jj<0)
            {
                jj += ny;
            }

            if (ii >= nx)
            {
                ii -= nx;
            }

            if (jj >= ny)
            {
                jj -= ny;
            }

            real_data_shifted[i][j] = real_data[ii][jj] / nx;
        }
    }

    res = real_data_shifted[boost::indices[range(nx/2-half_window,nx/2+half_window+1)][range(ny/2-half_window,ny/2+half_window+1)]];

}