//determine the most likely symbol
template <class GF_q, class real> void linear_code_utils<GF_q, real>::get_most_likely_received_word(
      const array1dv_t& received_likelihoods, array1d_t & received_word_sd,
      array1gfq_t& received_word_hd)
   {
   //some helper variables
   int length_n = received_likelihoods.size();
   real mostlikely_sofar = real(0.0);
   int indx = 0;
   array1d_t tmp_vec;
   int num_of_symbs;

   received_word_sd.init(length_n);
   received_word_hd.init(length_n);

   for (int loop_n = 0; loop_n < length_n; loop_n++)
      {
      mostlikely_sofar = 0;
      indx = 0;
      tmp_vec = received_likelihoods(loop_n);
      num_of_symbs = tmp_vec.size();
      for (int loop_q = 0; loop_q < num_of_symbs; loop_q++)
         {
         if (mostlikely_sofar <= tmp_vec(loop_q))
            {
            mostlikely_sofar = tmp_vec(loop_q);
            indx = loop_q;
            }
         }
      received_word_sd(loop_n) = mostlikely_sofar;
      received_word_hd(loop_n) = GF_q(indx);
      }
   }
Пример #2
0
vector<double> sub_vec(const vector<double>& vec, int first, int len) {
	// obtain the sub vector with length len starting from the first element
	vector<double>::const_iterator beg = vec.begin() + first;
	vector<double>::const_iterator end = beg + len;
	vector<double> tmp_vec(beg, end);
	return tmp_vec;
}
Пример #3
0
void find_lis(const std::vector<int> &sequence, std::vector<int> &lis) {

	lis.clear();
	lis.push_back(0);

	std::vector<int> tmp_vec(sequence.size());
	int lower, upper;
	for(size_t i = 0; i < sequence.size(); i++) {
		// if next element is greater than last element of longest subseq
		if( sequence[lis.back()] < sequence[i] ) {
			tmp_vec[i] = lis.back();
			lis.push_back(i);
			continue;
		}
		
		// binary search to find smallest element
		for(lower = 0, upper = lis.size()-1; lower < upper;) {
			int bin_index = (lower + upper) / 2;
			if(sequence[lis[bin_index]] < sequence[i])
				lower = bin_index + 1;
			else
				upper = bin_index;
		}
		
		// update lis if new value is smaller then previously
		if(sequence[i] < sequence[lis[lower]]) {
			if(lower > 0)
				tmp_vec[i] = lis[lower-1];
			lis[lower] = i;
		}
	}
	
	for(lower = lis.size(), upper = lis.back(); lower--; upper = tmp_vec[upper])
		lis[lower] = upper;
}
inline
typename T1::elem_type
op_median::median_vec
  (
  const T1& X,
  const typename arma_not_cx<typename T1::elem_type>::result* junk
  )
  {
  arma_extra_debug_sigprint();
  arma_ignore(junk);
  
  typedef typename T1::elem_type eT;

  const Proxy<T1> P(X);
  
  const uword n_elem = P.get_n_elem();
  
  arma_debug_check( (n_elem == 0), "median(): given object has no elements" );
  
  std::vector<eT> tmp_vec(n_elem);
    
  if(Proxy<T1>::prefer_at_accessor == false)
    {
    typedef typename Proxy<T1>::ea_type ea_type;
    
    ea_type A = P.get_ea();
    
    for(uword i=0; i<n_elem; ++i)
      {
      tmp_vec[i] = A[i];
      }
    }
  else
    {
    const uword n_rows = P.get_n_rows();
    const uword n_cols = P.get_n_cols();
    
    if(n_cols == 1)
      {
      for(uword row=0; row < n_rows; ++row)
        {
        tmp_vec[row] = P.at(row,0);
        }
      }
    else
    if(n_rows == 1)
      {
      for(uword col=0; col < n_cols; ++col)
        {
        tmp_vec[col] = P.at(0,col);
        }
      }
    else
      {
      arma_stop("op_median::median_vec(): expected a vector" );
      }
    }
  
  return op_median::direct_median(tmp_vec);
  }
Пример #5
0
 const ROL::Vector<Real> & dual() const {
   uint dimension = std_vec_->size();
   std::vector<Element> tmp_vec(*std_vec_);
   for (uint i = 0; i < dimension; i++) { 
     tmp_vec[i] /= (*scaling_vec_)[i];
   }
   dual_vec_ = Teuchos::rcp( new PrimalScaledStdVector<Real>( Teuchos::rcp(new std::vector<Element>(tmp_vec)), scaling_vec_ ) );
   return *dual_vec_;
 }
Пример #6
0
 //! Construct
 lcp_vlc(cache_config& config, std::string other_key="")
 {
     std::string lcp_key  = conf::KEY_LCP;
     if ("" != other_key) {
         lcp_key = other_key;
     }
     int_vector_buffer<> lcp_buf(cache_file_name(lcp_key, config));
     vlc_vec_type tmp_vec(lcp_buf);
     m_vec.swap(tmp_vec);
 }
Пример #7
0
int zimg_depth_process(zimg_depth_context *ctx, const void *src, void *dst, void *,
                       int width, int height, int src_stride, int dst_stride,
                       int pixel_in, int pixel_out, int depth_in, int depth_out, int fullrange_in, int fullrange_out, int chroma)
{
	try {
		const void *src_p[3] = { src, nullptr, nullptr };
		void *dst_p[3] = { dst, nullptr, nullptr };
		ptrdiff_t src_stride_[3] = { src_stride, 0, 0 };
		ptrdiff_t dst_stride_[3] = { dst_stride, 0, 0 };

		std::unique_ptr<zimg_filter> filter;
		zimg_depth_params params;
		size_t tmp_size;
		int err;

		zimg2_depth_params_default(&params, ZIMG_API_VERSION);

		params.width = width;
		params.height = height;

		params.dither_type = ctx->type;
		params.chroma = chroma;

		params.pixel_in = pixel_in;
		params.depth_in = depth_in;
		params.range_in = fullrange_in;

		params.pixel_out = pixel_out;
		params.depth_out = depth_out;
		params.range_out = fullrange_out;

		filter.reset(zimg2_depth_create(&params));
		if (!filter)
			return g_last_error;

		if ((err = zimg2_plane_filter_get_tmp_size(filter.get(), &tmp_size)))
			return err;

		zimg::AlignedVector<char> tmp_vec(tmp_size);

		return zimg2_plane_filter_process(filter.get(), tmp_vec.data(), src_p, dst_p, src_stride_, dst_stride_);
	} catch (const zimg::ZimgException &e) {
		return handle_exception(e);
	} catch (const std::bad_alloc &e) {
		return handle_exception(e);
	}
}
Пример #8
0
int _tmain(int argc, _TCHAR* argv[])
{
	//1.06	9.2		151		54.4	1.6
	//0.89	10.3	202		57.9	2.2
	//1.43	15.4	113		53		3.4
	//1.02	11.2	168		56		0.3
	//1.49	8.8		192		51.2	1
	//1.32	13.5	111		60		-2.2
	//1.22	12.2	175		67.6	2.2
	//1.1	9.2		245		57		3.3
	//1.34	13		168		60.4	7.2
	//1.12	12.4	197		53		2.7
	//0.75	7.5		173		51.5	6.5
	//1.13	10.9	178		62		3.7
	//1.15	12.7	199		53.7	6.4
	//1.09	12		96		49.8	1.4
	//0.96	7.6		164		62.2	-0.1
	//1.16	9.9		252		56		9.2
	//0.76	6.4		136		61.9	9
	//1.05	12.6	150		56.7	2.7
	//1.16	11.7	104		54		-2.1
	//1.2		11.8	148		59.9	3.5
	//1.04	8.6		204		61		3.5
	//1.07	9.3		174		54.3	5.9

	UINT n = 22;
	UINT k = 3;
	UINT vec_dim = 5;

	vector<ublas::vector<double>> coord_array;

	ifstream dataset_stream("dataset.txt");
	ublas::vector<double> tmp_vec(vec_dim);

	for(UINT i = 0; i < n; i++)
	{
		for(UINT j = 0; j < vec_dim; j++)
			dataset_stream >> tmp_vec(j);

		coord_array.push_back(tmp_vec);

		cout << tmp_vec << endl;
	}

	vector<ublas::vector<double>> the_centroid_vec;
	vector<boost::unordered_set<UINT>> the_groups;

	k_mean_cluster(k, n, vec_dim, coord_array, the_groups, the_centroid_vec);

	vector<UINT> representatives;

	for(UINT i = 0; i<k; i++) //for each group
	{
		//each group
		double closest_distance_to_centroid = std::numeric_limits<double>::max();
		UINT pixel_id_closest_distance_to_centroid = 0;
		for(boost::unordered_set<UINT>::iterator it = the_groups[i].begin(); it!=the_groups[i].end(); ++it)
		{
			double distance = compute_distance(coord_array[*it], the_centroid_vec[i]);
			if(distance < closest_distance_to_centroid)
			{
				closest_distance_to_centroid = distance;
				pixel_id_closest_distance_to_centroid = *it;
			}
		}
		representatives.push_back(pixel_id_closest_distance_to_centroid);
	}

	return 0;
}
Пример #9
0
inline
void
op_median::apply(Mat< std::complex<T> >& out, const Op<T1,op_median>& in)
  {
  arma_extra_debug_sigprint();
  
  typedef typename std::complex<T> eT;
  
  arma_type_check(( is_same_type<eT, typename T1::elem_type>::value == false ));
  
  const unwrap_check<T1> tmp(in.m, out);
  const Mat<eT>&     X = tmp.M;
  
  const uword X_n_rows = X.n_rows;
  const uword X_n_cols = X.n_cols;
  
  const uword dim = in.aux_uword_a;
  arma_debug_check( (dim > 1), "median(): incorrect usage. dim must be 0 or 1");
  
  if(dim == 0)  // in each column
    {
    arma_extra_debug_print("op_median::apply(), dim = 0");
    
    arma_debug_check( (X_n_rows == 0), "median(): given object has zero rows" );

    out.set_size(1, X_n_cols);
    
    std::vector< arma_cx_median_packet<T> > tmp_vec(X_n_rows);
    
    for(uword col=0; col<X_n_cols; ++col)
      {
      const eT* colmem = X.colptr(col);
      
      for(uword row=0; row<X_n_rows; ++row)
        {
        tmp_vec[row].val   = std::abs(colmem[row]);
        tmp_vec[row].index = row;
        }
      
      uword index1;
      uword index2;
      op_median::direct_cx_median_index(index1, index2, tmp_vec);
        
      out[col] = op_median::robust_mean(colmem[index1], colmem[index2]);
      }
    }
  else
  if(dim == 1)  // in each row
    {
    arma_extra_debug_print("op_median::apply(), dim = 1");
    
    arma_debug_check( (X_n_cols == 0), "median(): given object has zero columns" );

    out.set_size(X_n_rows, 1);
    
    std::vector< arma_cx_median_packet<T> > tmp_vec(X_n_cols);
    
    for(uword row=0; row<X_n_rows; ++row)
      {
      for(uword col=0; col<X_n_cols; ++col)
        {
        tmp_vec[col].val   = std::abs(X.at(row,col));
        tmp_vec[row].index = col;
        }
      
      uword index1;
      uword index2;
      op_median::direct_cx_median_index(index1, index2, tmp_vec);
      
      out[row] = op_median::robust_mean( X.at(row,index1), X.at(row,index2) );
      }
    }
  }
Пример #10
0
inline
void
op_median::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_median>& in)
  {
  arma_extra_debug_sigprint();
  
  typedef typename T1::elem_type eT;
  
  const unwrap_check<T1> tmp(in.m, out);
  const Mat<eT>&     X = tmp.M;
  
  const uword X_n_rows = X.n_rows;
  const uword X_n_cols = X.n_cols;
  
  const uword dim = in.aux_uword_a;
  arma_debug_check( (dim > 1), "median(): incorrect usage. dim must be 0 or 1");
  
  if(dim == 0)  // in each column
    {
    arma_extra_debug_print("op_median::apply(), dim = 0");
    
    arma_debug_check( (X_n_rows == 0), "median(): given object has zero rows" );

    out.set_size(1, X_n_cols);
    
    std::vector<eT> tmp_vec(X_n_rows);
      
    for(uword col=0; col<X_n_cols; ++col)
      {
      const eT* colmem = X.colptr(col);
      
      for(uword row=0; row<X_n_rows; ++row)
        {
        tmp_vec[row] = colmem[row];
        }
      
      out[col] = op_median::direct_median(tmp_vec);
      }
    }
  else
  if(dim == 1)  // in each row
    {
    arma_extra_debug_print("op_median::apply(), dim = 1");
    
    arma_debug_check( (X_n_cols == 0), "median(): given object has zero columns" );

    out.set_size(X_n_rows, 1);
    
    std::vector<eT> tmp_vec(X_n_cols);
      
    for(uword row=0; row<X_n_rows; ++row)
      {
      for(uword col=0; col<X_n_cols; ++col)
        {
        tmp_vec[col] = X.at(row,col);
        }
      
      out[row] =  op_median::direct_median(tmp_vec);
      }
    }
  }
Пример #11
0
inline
typename T1::elem_type
op_median::median_vec
  (
  const T1& X,
  const typename arma_cx_only<typename T1::elem_type>::result* junk
  )
  {
  arma_extra_debug_sigprint();
  arma_ignore(junk);
  
  typedef typename T1::elem_type eT;
  typedef typename T1::pod_type   T;
  
  const Proxy<T1> P(X);
  
  const uword n_elem = P.get_n_elem();
  
  if(n_elem == 0)
    {
    arma_debug_check(true, "median(): object has no elements");
    
    return Datum<eT>::nan;
    }
  
  std::vector< arma_cx_median_packet<T> > tmp_vec(n_elem);
  
  if(Proxy<T1>::prefer_at_accessor == false)
    {
    typedef typename Proxy<T1>::ea_type ea_type;
    
    ea_type A = P.get_ea();
    
    for(uword i=0; i<n_elem; ++i)
      {
      tmp_vec[i].val   = std::abs( A[i] );
      tmp_vec[i].index = i;
      }
    
    uword index1;
    uword index2;
    op_median::direct_cx_median_index(index1, index2, tmp_vec);
    
    return op_mean::robust_mean( A[index1], A[index2] );
    }
  else
    {
    const uword n_rows = P.get_n_rows();
    const uword n_cols = P.get_n_cols();
    
    if(n_cols == 1)
      {
      for(uword row=0; row < n_rows; ++row)
        {
        tmp_vec[row].val   = std::abs( P.at(row,0) );
        tmp_vec[row].index = row;
        }
      
      uword index1;
      uword index2;
      op_median::direct_cx_median_index(index1, index2, tmp_vec);
      
      return op_mean::robust_mean( P.at(index1,0), P.at(index2,0) );
      }
    else
    if(n_rows == 1)
      {
      for(uword col=0; col < n_cols; ++col)
        {
        tmp_vec[col].val   = std::abs( P.at(0,col) );
        tmp_vec[col].index = col;
        }
      
      uword index1;
      uword index2;
      op_median::direct_cx_median_index(index1, index2, tmp_vec);
      
      return op_mean::robust_mean( P.at(0,index1), P.at(0,index2) );
      }
    else
      {
      arma_stop("op_median::median_vec(): expected a vector" );
      
      return eT(0);
      }
    }
  }
Пример #12
0
inline
typename T1::elem_type
op_median::median_vec
  (
  const T1& X,
  const typename arma_not_cx<typename T1::elem_type>::result* junk
  )
  {
  arma_extra_debug_sigprint();
  arma_ignore(junk);
  
  typedef typename T1::elem_type eT;
  
  typedef typename Proxy<T1>::stored_type P_stored_type;
    
  const Proxy<T1> P(X);
  
  const uword n_elem = P.get_n_elem();
  
  if(n_elem == 0)
    {
    arma_debug_check(true, "median(): object has no elements");
    
    return Datum<eT>::nan;
    }
  
  std::vector<eT> tmp_vec(n_elem);
  
  if(is_Mat<P_stored_type>::value == true)
    {
    const unwrap<P_stored_type> tmp(P.Q);
    
    const typename unwrap<P_stored_type>::stored_type& Y = tmp.M;
    
    arrayops::copy( &(tmp_vec[0]), Y.memptr(), n_elem );
    }
  else
    {
    if(Proxy<T1>::prefer_at_accessor == false)
      {
      typedef typename Proxy<T1>::ea_type ea_type;
      
      ea_type A = P.get_ea();
      
      for(uword i=0; i<n_elem; ++i)  { tmp_vec[i] = A[i]; }
      }
    else
      {
      const uword n_rows = P.get_n_rows();
      const uword n_cols = P.get_n_cols();
      
      if(n_cols == 1)
        {
        for(uword row=0; row < n_rows; ++row)  { tmp_vec[row] = P.at(row,0); }
        }
      else
      if(n_rows == 1)
        {
        for(uword col=0; col < n_cols; ++col)  { tmp_vec[col] = P.at(0,col); }
        }
      else
        {
        arma_stop("op_median::median_vec(): expected a vector" );
        }
      }
    }
  
  return op_median::direct_median(tmp_vec);
  }
Пример #13
0
inline
void
op_median::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_median>& in)
  {
  arma_extra_debug_sigprint();
  
  typedef typename T1::elem_type eT;
  
  const uword dim = in.aux_uword_a;
  arma_debug_check( (dim > 1), "median(): parameter 'dim' must be 0 or 1" );
  
  const Proxy<T1> P(in.m);
  
  typedef typename Proxy<T1>::stored_type P_stored_type;
  
  const bool is_alias = P.is_alias(out);
  
  if( (is_Mat<P_stored_type>::value == true) || is_alias )
    {
    const unwrap_check<P_stored_type> tmp(P.Q, is_alias);
    
    const typename unwrap_check<P_stored_type>::stored_type& X = tmp.M;
    
    const uword X_n_rows = X.n_rows;
    const uword X_n_cols = X.n_cols;
    
    if(dim == 0)  // in each column
      {
      arma_extra_debug_print("op_median::apply(): dim = 0");
      
      out.set_size((X_n_rows > 0) ? 1 : 0, X_n_cols);
      
      if(X_n_rows > 0)
        {
        std::vector<eT> tmp_vec(X_n_rows);
        
        for(uword col=0; col < X_n_cols; ++col)
          {
          arrayops::copy( &(tmp_vec[0]), X.colptr(col), X_n_rows );
          
          out[col] = op_median::direct_median(tmp_vec);
          }
        }
      }
    else  // in each row
      {
      arma_extra_debug_print("op_median::apply(): dim = 1");
      
      out.set_size(X_n_rows, (X_n_cols > 0) ? 1 : 0);
      
      if(X_n_cols > 0)
        {
        std::vector<eT> tmp_vec(X_n_cols);
          
        for(uword row=0; row < X_n_rows; ++row)
          {
          for(uword col=0; col < X_n_cols; ++col)  { tmp_vec[col] = X.at(row,col); }
          
          out[row] = op_median::direct_median(tmp_vec);
          }
        }
      }
    }
  else
    {
    const uword P_n_rows = P.get_n_rows();
    const uword P_n_cols = P.get_n_cols();
    
    if(dim == 0)  // in each column
      {
      arma_extra_debug_print("op_median::apply(): dim = 0");
      
      out.set_size((P_n_rows > 0) ? 1 : 0, P_n_cols);
      
      if(P_n_rows > 0)
        {
        std::vector<eT> tmp_vec(P_n_rows);
        
        for(uword col=0; col < P_n_cols; ++col)
          {
          for(uword row=0; row < P_n_rows; ++row)  { tmp_vec[row] = P.at(row,col); }
          
          out[col] = op_median::direct_median(tmp_vec);
          }
        }
      }
    else  // in each row
      {
      arma_extra_debug_print("op_median::apply(): dim = 1");
      
      out.set_size(P_n_rows, (P_n_cols > 0) ? 1 : 0);
      
      if(P_n_cols > 0)
        {
        std::vector<eT> tmp_vec(P_n_cols);
          
        for(uword row=0; row < P_n_rows; ++row)
          {
          for(uword col=0; col < P_n_cols; ++col)  { tmp_vec[col] = P.at(row,col); }
          
          out[row] = op_median::direct_median(tmp_vec);
          }
        }
      }
    }
  }
Пример #14
0
/*
 * Fits a weighted cubic regression on predictor(s)
 *
 * @param contrast - want to predict this value per snp
 * @param strength - covariate of choice
 * @param weights - weight of data points for this genotype
 * @param Predictor - output, prediction function coefficients
 * @param Predicted - output, predicted contrast per snp
 */
void
FitWeightedCubic(const std::vector<double> &contrast,
                 const std::vector<double> &strength,
                 const std::vector<double> &weights,
                 std::vector<double> &Predictor,
                 std::vector<double> &Predicted) {

  	// Singular value decomposition method
	unsigned int i;
	unsigned int nobs;
	unsigned int npred;
	npred = 3+1;
	nobs= contrast.size();

	// convert double into doubles to match newmat
  vector<Real> tmp_vec(nobs);
  Real* tmp_ptr = &tmp_vec[0];
	vector<Real> obs_vec(nobs);
	Real *obs_ptr = &obs_vec[0];
	vector<Real> weight_vec(nobs);

  Matrix covarMat(nobs,npred);
	ColumnVector observedVec(nobs);

	// fill in the data
	// modified by weights
	for (i=0; i<nobs; i++)
		weight_vec[i] = sqrt(weights[i]);

  	// load data - 1s into col 1 of matrix
	for (i=0; i<nobs; i++)
		tmp_vec[i] = weight_vec[i];
  	covarMat.Column(1) << tmp_ptr;
	for (i=0; i<nobs; i++)
		tmp_vec[i] *= strength[i];
  	covarMat.Column(2) << tmp_ptr;
	for (i=0; i<nobs; i++)
		tmp_vec[i] *= strength[i];
  	covarMat.Column(3) << tmp_ptr;
	for (i=0; i<nobs; i++)
		tmp_vec[i] *= strength[i];
  	covarMat.Column(4) << tmp_ptr;

  	for (i=0; i<nobs; i++)
		obs_vec[i] = contrast[i]*weight_vec[i];
  	observedVec << obs_ptr;

  	// do SVD
  	Matrix U, V;
  	DiagonalMatrix D;
    ColumnVector Fitted(nobs);
    ColumnVector A(npred);

  	SVD(covarMat,D,U,V);

  	Fitted = U.t() * observedVec;
  	A = V * ( D.i() * Fitted );

	// this predicts "0" for low weights
	// because of weighted regression
  	Fitted = U * Fitted;

	// this is the predictor
	Predictor.resize(npred);
	for (i=0; i<npred; i++)
		Predictor[i] = A.element(i);


  // export data back to doubles
	// and therefore this predicts "0" for low-weighted points
	// which is >not< the desired outcome!!!!
	// instead we need to predict all points at once
	// >unweighted< as output
	vector<double> Goofy;
	Predicted.resize(nobs);
  	for (i = 0; i < nobs; ++i) {
      Goofy.resize(npred);
      Goofy[0] = 1;
      Goofy[1] = strength[i];
      Goofy[2] = strength[i]*Goofy[1];
      Goofy[3] = strength[i]*Goofy[2];
      Predicted[i] = vprod(Goofy,Predictor);
  	}
}
Пример #15
0
int cPCA2::computePC(std::vector<float>& x,
                     size_t nrow,
                     size_t ncol,
                     bool is_center,
                     bool is_scale,
                     bool is_corr)
{
    _ncol     = ncol;
    _nrow     = nrow;
    _is_center = is_center;
    _is_scale  = is_scale;
    _is_corr   = is_corr;

    if (x.size() != _nrow*_ncol)     { return -1; }
    if ((1 == _ncol) || (1 == nrow)) { return -1; }

    // convert vector to Eigen 2-dimensional matrix
    _xXf.resize(_nrow, _ncol);

    for (size_t i = 0; i < _nrow; ++i) {
        for (size_t j = 0; j < _ncol; ++j) {
            _xXf(i, j) = x[j + i*_ncol];
        }
    }

    // mean and standard deviation for each column
    Eigen::VectorXf     mean_vector(_ncol),
                        sd_vector(_ncol);
    size_t              zero_sd_num = 0;
    float               denom = static_cast<float>((_nrow > 1) ? _nrow - 1 : 1);

    mean_vector = _xXf.colwise().mean();

    Eigen::VectorXf     curr_col;

    for (size_t i = 0; i < _ncol; ++i) {
        curr_col = Eigen::VectorXf::Constant(_nrow, mean_vector(i));    // mean(x) for column x
        curr_col = _xXf.col(i) - curr_col;                              // x - mean(x)
        curr_col = curr_col.array().square();                           // (x-mean(x))^2

        sd_vector(i) = std::sqrt((curr_col.sum())/denom);

        if (0 == sd_vector(i)) {
            zero_sd_num++;
        }
    }

    // if colums with sd == 0 are too many, don't continue calculation
    if (1 > _ncol-zero_sd_num) {
        return -1;
    }

    // delete columns with sd == 0
    Eigen::MatrixXf     tmp(_nrow, _ncol-zero_sd_num);
    Eigen::VectorXf     tmp_mean_vector(_ncol-zero_sd_num);

    size_t              curr_col_num = 0;

    for (size_t i = 0; i < _ncol; ++i) {
        if (0 != sd_vector(i)) {
            tmp.col(curr_col_num) = _xXf.col(i);
            tmp_mean_vector(curr_col_num) = mean_vector(i);
            curr_col_num++;
        }
        else {
            _eliminated_columns.push_back(i);
        }
    }

    _ncol      -= zero_sd_num;
    _xXf        = tmp;
    mean_vector = tmp_mean_vector;
    tmp.resize(0, 0);
    tmp_mean_vector.resize(0);

    // shift to zero
    if (true == _is_center) {
        for (size_t i = 0; i < _ncol; ++i) {
            _xXf.col(i) -= Eigen::VectorXf::Constant(_nrow, mean_vector(i));
        }
    }

    // scale to unit variance
    if ( (false == _is_corr) || (true == _is_scale)) {
        for (size_t i = 0; i < _ncol; ++i) {
            _xXf.col(i) /= std::sqrt(_xXf.col(i).array().square().sum()/denom);
        }
    }

#ifndef NDEBUG
    std::cout << "\nScaled matrix:\n";
    std::cout << _xXf << std::endl;
    std::cout << "\nMean before scaling:\n" << mean_vector.transpose();
    std::cout << "\nStandard deviation before scaling:\n" << sd_vector.transpose();
#endif

    // when _nrow < _ncol then svd will be used
    // if corr is true and _nrow > _ncol then correlation matrix will be used
    // (TODO): What about covariance?
    if ((_nrow < _ncol) || (false == _is_corr)) {
        _method = "svd";

        Eigen::JacobiSVD<Eigen::MatrixXf>   svd(_xXf, Eigen::ComputeThinV);

        Eigen::VectorXf     eigen_singular_values = svd.singularValues();
        Eigen::VectorXf     tmp_vec = eigen_singular_values.array().square();
        float               tmp_sum = tmp_vec.sum();
        size_t              lim = (_nrow < _ncol)? _nrow : _ncol;

        tmp_vec /= tmp_sum;

        // PC's standard deviation and
        // PC's proportion of variance
        _kaiser = 0;
        for (size_t i = 0; i < lim; ++i) {
            _sd.push_back(eigen_singular_values(i)/std::sqrt(denom));

            if (_sd[i] >= 1) {
                _kaiser = (unsigned int) i + 1;
            }

            _prop_of_var.push_back(tmp_vec(i));
        }

        tmp_vec.resize(0);

#ifndef NDEBUG
        std::cout << "\n\nStandard deviations for PCs:\n";
        copy(_sd.begin(), _sd.end(),std::ostream_iterator<float>(std::cout," "));
        std::cout << "\n\nKaiser criterion: PC #" << _kaiser << std::endl;
#endif

        // PC's cumulative proportion
        _thresh95 = 1;
        _cum_prop.push_back(_prop_of_var[0]);

        for (size_t i = 1; i < _prop_of_var.size(); ++i) {
            _cum_prop.push_back(_cum_prop[i-1]+_prop_of_var[i]);

            if (_cum_prop[i] < 0.95) {
                _thresh95 = (unsigned int) i + 1;
            }
        }

#ifndef NDEBUG
        std::cout << "\nCumulative proportion:\n";
        copy(_cum_prop.begin(), _cum_prop.end(),std::ostream_iterator<float>(std::cout," "));
        std::cout << "\n\nThresh95 criterion: PC #" << _thresh95 << std::endl;
#endif

        // scores
        Eigen::MatrixXf     eigen_scores = _xXf * svd.matrixV();

#ifndef NDEBUG
        std::cout << "\n\nRotated values (scores):\n" << eigen_scores;
#endif

        _scores.reserve(lim*lim);

        for (size_t i = 0; i < lim; ++i) {
            for (size_t j = 0; j < lim; ++j) {
                _scores.push_back(eigen_scores(i, j));
            }
        }

        eigen_scores.resize(0, 0);

#ifndef NDEBUG
        std::cout << "\n\nScores in vector:\n";
        copy(_scores.begin(), _scores.end(),std::ostream_iterator<float>(std::cout," "));
        std::cout << "\n";
#endif

    }
    else {    // COR OR COV MATRICES ARE HERE
        _method = "cor";

        // calculate covariance matrix
        Eigen::MatrixXf     eigen_cov; // = MatrixXf::Zero(_ncol, _ncol);
        Eigen::VectorXf     sds;

        // (TODO) should be weighted cov matrix, even if is_center == false
        eigen_cov = (1.0f /((float) _nrow/*-1*/)) * _xXf.transpose() * _xXf;
        sds = eigen_cov.diagonal().array().sqrt();
        Eigen::MatrixXf outer_sds = sds * sds.transpose();
        eigen_cov = eigen_cov.array() / outer_sds.array();
        outer_sds.resize(0, 0);

        // ?if data matrix is scaled, covariance matrix is equal to correlation matrix
        Eigen::EigenSolver<Eigen::MatrixXf>     edc(eigen_cov);
        Eigen::VectorXf                         eigen_eigenvalues = edc.eigenvalues().real();
        Eigen::MatrixXf                         eigen_eigenvectors = edc.eigenvectors().real();

#ifndef NDEBUG
        std::cout << eigen_cov << std::endl;
        std::cout << std::endl << eigen_eigenvalues.transpose() << std::endl;
        std::cout << std::endl << eigen_eigenvectors << std::endl;
#endif

        // the eigenvalues and eigenvectors are not sorted
        // so, we should sort them
        typedef std::pair<float,int>    eigen_pair;
        std::vector<eigen_pair>         ep;

        for (size_t i = 0 ; i < _ncol; ++i) {
            ep.push_back(std::make_pair(eigen_eigenvalues(i), i));
        }

        sort(ep.begin(), ep.end());     // ascending order by default

        // sort them all in descending order
        Eigen::MatrixXf     eigen_eigenvectors_sorted = Eigen::MatrixXf::Zero(eigen_eigenvectors.rows(), eigen_eigenvectors.cols());
        Eigen::VectorXf     eigen_eigenvalues_sorted  = Eigen::VectorXf::Zero(_ncol);
        int                 colnum = 0;

        for (int i = (int) ep.size()-1; i > -1; i--) {
            eigen_eigenvalues_sorted(colnum)         = ep[i].first;
            eigen_eigenvectors_sorted.col(colnum++) += eigen_eigenvectors.col(ep[i].second);
        }

#ifndef NDEBUG
        std::cout << std::endl << eigen_eigenvalues_sorted.transpose() << std::endl;
        std::cout << std::endl << eigen_eigenvectors_sorted << std::endl;
#endif

        // we don't need not sorted arrays anymore
        eigen_eigenvalues.resize(0);
        eigen_eigenvectors.resize(0, 0);

        _sd.clear();
        _prop_of_var.clear();
        _kaiser = 0;

        float       tmp_sum = eigen_eigenvalues_sorted.sum();

        for (size_t i = 0; i < _ncol; ++i) {
            _sd.push_back(std::sqrt(eigen_eigenvalues_sorted(i)));

            if (_sd[i] >= 1) {
                _kaiser = (unsigned int) i + 1;
            }

            _prop_of_var.push_back(eigen_eigenvalues_sorted(i)/tmp_sum);
        }

#ifndef NDEBUG
        std::cout << "\nStandard deviations for PCs:\n";
        copy(_sd.begin(), _sd.end(), std::ostream_iterator<float>(std::cout," "));
        std::cout << "\nProportion of variance:\n";
        copy(_prop_of_var.begin(), _prop_of_var.end(), std::ostream_iterator<float>(std::cout," "));
        std::cout << "\nKaiser criterion: PC #" << _kaiser << std::endl;
#endif

        // PC's cumulative proportion
        _cum_prop.clear();
        _thresh95 = 1;
        _cum_prop.push_back(_prop_of_var[0]);

        for (size_t i = 1; i < _prop_of_var.size(); ++i) {
            _cum_prop.push_back(_cum_prop[i-1]+_prop_of_var[i]);

            if (_cum_prop[i] < 0.95) {
                _thresh95 = (unsigned int) i + 1;
            }
        }

#ifndef NDEBUG
        std::cout << "\n\nCumulative proportions:\n";
        copy(_cum_prop.begin(), _cum_prop.end(), std::ostream_iterator<float>(std::cout," "));
        std::cout << "\n\n95% threshold: PC #" << _thresh95 << std::endl;
#endif

        // scores for PCA with correlation matrix
        // scale before calculating new values

        for (size_t i = 0; i < _ncol; ++i) {
            _xXf.col(i) /= sds(i);
        }

        sds.resize(0);
        Eigen::MatrixXf     eigen_scores = _xXf * eigen_eigenvectors_sorted;

#ifndef NDEBUG
        std::cout << "\n\nRotated values (scores):\n" << eigen_scores;
#endif

        _scores.clear();
        _scores.reserve(_ncol*_nrow);

        for (size_t i = 0; i < _nrow; ++i) {
            for (size_t j = 0; j < _ncol; ++j) {
                _scores.push_back(eigen_scores(i, j));
            }
        }

        eigen_scores.resize(0, 0);

#ifndef NDEBUG
        std::cout << "\n\nScores in vector:\n";
        copy(_scores.begin(), _scores.end(), std::ostream_iterator<float>(std::cout," "));
        std::cout << "\n";
#endif
    }

    return 0;
}
Пример #16
0
void
TempoTrackV2::viterbi_decode(const d_mat_t &rcfmat, const d_vec_t &wv, d_vec_t &beat_period, d_vec_t &tempi)
{
    // following Kevin Murphy's Viterbi decoding to get best path of
    // beat periods through rfcmat

    // make transition matrix
    d_mat_t tmat;
    for (unsigned int i=0;i<wv.size();i++)
    {
        tmat.push_back ( d_vec_t() ); // adds a new column
        for (unsigned int j=0; j<wv.size(); j++)
        {
            tmat[i].push_back(0.); // fill with zeros initially
        }
    }

    // variance of Gaussians in transition matrix
    // formed of Gaussians on diagonal - implies slow tempo change
    double sigma = 8.;
    // don't want really short beat periods, or really long ones
    for (unsigned int i=20;i <wv.size()-20; i++)
    {
        for (unsigned int j=20; j<wv.size()-20; j++)
        {
            double mu = static_cast<double>(i);
            tmat[i][j] = exp( (-1.*pow((j-mu),2.)) / (2.*pow(sigma,2.)) );
        }
    }

    // parameters for Viterbi decoding... this part is taken from
    // Murphy's matlab

    d_mat_t delta;
    i_mat_t psi;
    for (unsigned int i=0;i <rcfmat.size(); i++)
    {
        delta.push_back( d_vec_t());
        psi.push_back( i_vec_t());
        for (unsigned int j=0; j<rcfmat[i].size(); j++)
        {
            delta[i].push_back(0.); // fill with zeros initially
            psi[i].push_back(0); // fill with zeros initially
        }
    }


    unsigned int T = delta.size();

    if (T < 2) return; // can't do anything at all meaningful

    unsigned int Q = delta[0].size();

    // initialize first column of delta
    for (unsigned int j=0; j<Q; j++)
    {
        delta[0][j] = wv[j] * rcfmat[0][j];
        psi[0][j] = 0;
    }

    double deltasum = 0.;
    for (unsigned int i=0; i<Q; i++)
    {
        deltasum += delta[0][i];
    }
    for (unsigned int i=0; i<Q; i++)
    {
        delta[0][i] /= (deltasum + EPS);
    }


    for (unsigned int t=1; t<T; t++)
    {
        d_vec_t tmp_vec(Q);

        for (unsigned int j=0; j<Q; j++)
        {
            for (unsigned int i=0; i<Q; i++)
            {
                tmp_vec[i] = delta[t-1][i] * tmat[j][i];
            }

            delta[t][j] = get_max_val(tmp_vec);

            psi[t][j] = get_max_ind(tmp_vec);

            delta[t][j] *= rcfmat[t][j];
        }

        // normalise current delta column
        double deltasum = 0.;
        for (unsigned int i=0; i<Q; i++)
        {
            deltasum += delta[t][i];
        }
        for (unsigned int i=0; i<Q; i++)
        {
            delta[t][i] /= (deltasum + EPS);
        }
    }

    i_vec_t bestpath(T);
    d_vec_t tmp_vec(Q);
    for (unsigned int i=0; i<Q; i++)
    {
        tmp_vec[i] = delta[T-1][i];
    }

    // find starting point - best beat period for "last" frame
    bestpath[T-1] = get_max_ind(tmp_vec);

    // backtrace through index of maximum values in psi
    for (unsigned int t=T-2; t>0 ;t--)
    {
        bestpath[t] = psi[t+1][bestpath[t+1]];
    }

    // weird but necessary hack -- couldn't get above loop to terminate at t >= 0
    bestpath[0] = psi[1][bestpath[1]];

    unsigned int lastind = 0;
    for (unsigned int i=0; i<T; i++)
    {
        unsigned int step = 128;
        for (unsigned int j=0; j<step; j++)
        {
            lastind = i*step+j;
            beat_period[lastind] = bestpath[i];
        }
//        std::cerr << "bestpath[" << i << "] = " << bestpath[i] << " (used for beat_periods " << i*step << " to " << i*step+step-1 << ")" << std::endl;
    }

    //fill in the last values...
    for (unsigned int i=lastind; i<beat_period.size(); i++)
    {
        beat_period[i] = beat_period[lastind];
    }

    for (unsigned int i = 0; i < beat_period.size(); i++)
    {
        tempi.push_back((60. * m_rate / m_increment)/beat_period[i]);
    }
}