BPPottsPotential(const float* features1, const float* features2, int D, int N1, int N2, float w, bool per_pixel_normalization=true) :N1_(N1), N2_(N2), w_(w) { float * features = new float[ (N1_+N2_)*D ]; memset( features, 0, (N1_+N2_)*D*sizeof(float) ); memcpy( features , features1, N1_*D*sizeof(float) ); memcpy( features+N1_*D, features2, N2_*D*sizeof(float) ); lattice_.init( features, D, N1_+N2_ ); delete [] features; norm_ = allocate( N2_ ); float * tmp = allocate( N1_ ); for( int i=0; i<N1_; i++ ) tmp[i] = 1; // Compute the normalization factor lattice_.compute( norm_, tmp, 1, 0, N1_, N1_, N2_ ); if( per_pixel_normalization ){ // use a per pixel normalization for( int i=0; i<N2_; i++ ) norm_[i] = 1.f / (norm_[i]+1e-20f); } else{ float mean_norm = 0; for( int i=0; i<N2_; i++ ) mean_norm += norm_[i]; mean_norm = N2_ / mean_norm; // use a per pixel normalization for( int i=0; i<N2_; i++ ) norm_[i] = mean_norm; } deallocate( tmp ); }
MatrixXf featureGradient( const MatrixXf & a, const MatrixXf & b ) const { if (ntype_ == NO_NORMALIZATION ) return kernelGradient( a, b ); else if (ntype_ == NORMALIZE_SYMMETRIC ) { MatrixXf fa = lattice_.compute( a*norm_.asDiagonal(), true ); MatrixXf fb = lattice_.compute( b*norm_.asDiagonal() ); MatrixXf ones = MatrixXf::Ones( a.rows(), a.cols() ); VectorXf norm3 = norm_.array()*norm_.array()*norm_.array(); MatrixXf r = kernelGradient( 0.5*( a.array()*fb.array() + fa.array()*b.array() ).matrix()*norm3.asDiagonal(), ones ); return - r + kernelGradient( a*norm_.asDiagonal(), b*norm_.asDiagonal() ); } else if (ntype_ == NORMALIZE_AFTER ) { MatrixXf fb = lattice_.compute( b ); MatrixXf ones = MatrixXf::Ones( a.rows(), a.cols() ); VectorXf norm2 = norm_.array()*norm_.array(); MatrixXf r = kernelGradient( ( a.array()*fb.array() ).matrix()*norm2.asDiagonal(), ones ); return - r + kernelGradient( a*norm_.asDiagonal(), b ); } else /*if (ntype_ == NORMALIZE_BEFORE )*/ { MatrixXf fa = lattice_.compute( a, true ); MatrixXf ones = MatrixXf::Ones( a.rows(), a.cols() ); VectorXf norm2 = norm_.array()*norm_.array(); MatrixXf r = kernelGradient( ( fa.array()*b.array() ).matrix()*norm2.asDiagonal(), ones ); return -r+kernelGradient( a, b*norm_.asDiagonal() ); } }
void filter( MatrixXf & out, const MatrixXf & in, bool transpose ) const { // Read in the values if( ntype_ == NORMALIZE_SYMMETRIC || (ntype_ == NORMALIZE_BEFORE && !transpose) || (ntype_ == NORMALIZE_AFTER && transpose)) out = in*norm_.asDiagonal(); else out = in; // Filter if( transpose ) lattice_.compute( out, out, true ); else lattice_.compute( out, out ); // lattice_.compute( out.data(), out.data(), out.rows() ); // Normalize again if( ntype_ == NORMALIZE_SYMMETRIC || (ntype_ == NORMALIZE_BEFORE && transpose) || (ntype_ == NORMALIZE_AFTER && !transpose)) out = out*norm_.asDiagonal(); }
PottsPotential(const float* features, int D, int N, float w, bool per_pixel_normalization=true) :N_(N), w_(w) { lattice_.init( features, D, N ); norm_ = allocate( N ); for ( int i=0; i<N; i++ ) norm_[i] = 1; // Compute the normalization factor lattice_.compute( norm_, norm_, 1 ); if ( per_pixel_normalization ) { // use a per pixel normalization for ( int i=0; i<N; i++ ) norm_[i] = 1.f / (norm_[i]+1e-20f); } else { float mean_norm = 0; for ( int i=0; i<N; i++ ) mean_norm += norm_[i]; mean_norm = N / mean_norm; // use a per pixel normalization for ( int i=0; i<N; i++ ) norm_[i] = mean_norm; } }
void initLattice( const MatrixXf & f ) { const int N = f.cols(); lattice_.init( f ); norm_ = lattice_.compute( VectorXf::Ones( N ).transpose() ).transpose(); if ( ntype_ == NO_NORMALIZATION ) { float mean_norm = 0; for ( int i=0; i<N; i++ ) mean_norm += norm_[i]; mean_norm = N / mean_norm; for ( int i=0; i<N; i++ ) norm_[i] = mean_norm; } else if ( ntype_ == NORMALIZE_SYMMETRIC ) { for ( int i=0; i<N; i++ ) norm_[i] = 1.0 / sqrt(norm_[i]+1e-20); } else { for ( int i=0; i<N; i++ ) norm_[i] = 1.0 / (norm_[i]+1e-20); } }
// Compute d/df a^T*K*b MatrixXf kernelGradient( const MatrixXf & a, const MatrixXf & b ) const { MatrixXf g = 0*f_; lattice_.gradient( g.data(), a.data(), b.data(), a.rows() ); return g; }
void apply(float* out_values, const float* in_values, float* tmp, int value_size) const { lattice_.compute( tmp, in_values, value_size ); for ( int i=0,k=0; i<N_; i++ ) for ( int j=0; j<value_size; j++, k++ ) out_values[k] += w_*norm_[i]*tmp[k]; }