コード例 #1
0
ファイル: GridDomain.cpp プロジェクト: ckhroulev/glint2
void GridDomain::global_to_local(
	blitz::Array<double,1> const &global,
	std::vector<blitz::Array<double,1>> &olocal)
{
	if (olocal.size() != this->num_local_indices) {
		fprintf(stderr, "MatrixDomainer::get_rows() had bad dimension 1 = %d (expected %ld)\n", olocal.extent(1), this->num_local_indices);
		throw std::exception();
	}

	for (auto ii = olocal.begin(); ii != olocal.end(); ++ii) {
		// Make sure it has the right dimensions
		if (olocal[i].extent(0) != global.extent(0)) {
			fprintf(stderr, "MatrixDomainer::get_rows() had bad dimension 0 = %d (expected %ld)\n", olocal.extent(0), global.extent(0));
			throw std::exception();
		}
	}


	// Copy out data, translating to local coordinates
	for (int j=0; j < global.extent(0); ++j) {
		int lindex[this->num_local_indices];
		this->global_to_local(global(j), lindex);
		for (int i=0; i<this->num_local_indices; ++i)
			olocal[i](j) = lindex[i];
	}
}
コード例 #2
0
ファイル: GMMMachine.cpp プロジェクト: 183amir/bob.learn.em
double bob::learn::em::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x,
  blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const
{
  // Check dimension
  bob::core::array::assertSameDimensionLength(log_weighted_gaussian_likelihoods.extent(0), m_n_gaussians);
  bob::core::array::assertSameDimensionLength(x.extent(0), m_n_inputs);
  return logLikelihood_(x,log_weighted_gaussian_likelihoods);
}
コード例 #3
0
ファイル: machine.cpp プロジェクト: 183amir/bob.learn.linear
  void Machine::setInputDivision (const blitz::Array<double,1>& v) {

    if (m_weight.extent(0) != v.extent(0)) {
      boost::format m("mismatch on the input division shape: expected a vector of size %d, but you input one with size = %d instead");
      m % m_weight.extent(0) % v.extent(0);
      throw std::runtime_error(m.str());
    }
    m_input_div.reference(bob::core::array::ccopy(v));

  }
コード例 #4
0
ファイル: Function.cpp プロジェクト: 183amir/bob.extension
blitz::Array<double,1> bob::example::library::reverse (const blitz::Array<double,1>& array){
  // create new array in the desired shape
  blitz::Array<double,1> retval(array.shape());
  // copy data
  for (int i = 0, j = array.extent(0)-1; i < array.extent(0); ++i, --j){
    retval(j) = array(i);
  }
  // return the copied data
  return retval;
}
コード例 #5
0
ファイル: machine.cpp プロジェクト: 183amir/bob.learn.linear
  void Machine::setBiases (const blitz::Array<double,1>& bias) {

    if (m_weight.extent(1) != bias.extent(0)) {
      boost::format m("mismatch on the bias shape: expected a vector of size %d, but you input one with size = %d instead");
      m % m_weight.extent(1) % bias.extent(0);
      throw std::runtime_error(m.str());
    }
    m_bias.reference(bob::core::array::ccopy(bias));

  }
コード例 #6
0
ファイル: machine.cpp プロジェクト: 183amir/bob.learn.linear
 Machine::Machine(const blitz::Array<double,2>& weight)
   : m_input_sub(weight.extent(0)),
   m_input_div(weight.extent(0)),
   m_bias(weight.extent(1)),
   m_activation(boost::make_shared<bob::learn::activation::IdentityActivation>()),
   m_buffer(weight.extent(0))
 {
   m_input_sub = 0.0;
   m_input_div = 1.0;
   m_bias = 0.0;
   m_weight.reference(bob::core::array::ccopy(weight));
 }
コード例 #7
0
ファイル: GMMMachine.cpp プロジェクト: 183amir/bob.learn.em
double bob::learn::em::GMMMachine::logLikelihood(const blitz::Array<double, 2> &x) const {
  // Check dimension
  bob::core::array::assertSameDimensionLength(x.extent(1), m_n_inputs);
  // Call the other logLikelihood_ (overloaded) function


  double sum_ll = 0;
  for (int i=0; i<x.extent(0); i++)
    sum_ll+= logLikelihood_(x(i,blitz::Range::all()));

  return sum_ll/x.extent(0);  
}
コード例 #8
0
ファイル: blitz.hpp プロジェクト: seifer08ms/icebin
void check_dimensions(
    std::string const &vname,
    blitz::Array<T, rank> const &arr,
    std::vector<int> const &dims)
{
    for (int i=0; i<rank; ++i) {
        if (dims[i] >= 0 && arr.extent(i) != dims[i]) {
            fprintf(stderr,
                    "Error in %s: expected dimension #%d = %d (is %d instead)\n",
                    vname.c_str(), i, dims[i], arr.extent(i));
            throw std::exception();
        }
    }
}
コード例 #9
0
ファイル: machine.cpp プロジェクト: 183amir/bob.learn.linear
  void Machine::forward (const blitz::Array<double,1>& input, blitz::Array<double,1>& output) const {

    if (m_weight.extent(0) != input.extent(0)) { //checks input dimension
      boost::format m("mismatch on the input dimension: expected a vector of size %d, but you input one with size = %d instead");
      m % m_weight.extent(0) % input.extent(0);
      throw std::runtime_error(m.str());
    }
    if (m_weight.extent(1) != output.extent(0)) { //checks output dimension
      boost::format m("mismatch on the output dimension: expected a vector of size %d, but you input one with size = %d instead");
      m % m_weight.extent(1) % output.extent(0);
      throw std::runtime_error(m.str());
    }
    forward_(input, output);

  }
コード例 #10
0
ファイル: machine.cpp プロジェクト: 183amir/bob.learn.linear
  void Machine::setWeights (const blitz::Array<double,2>& weight) {

    if (weight.extent(0) != m_input_sub.extent(0)) { //checks 1st dimension
      boost::format m("mismatch on the weight shape (number of rows): expected a weight matrix with %d row(s), but you input one with %d row(s) instead");
      m % m_input_sub.extent(0) % weight.extent(0);
      throw std::runtime_error(m.str());
    }
    if (weight.extent(1) != m_bias.extent(0)) { //checks 2nd dimension
      boost::format m("mismatch on the weight shape (number of columns): expected a weight matrix with %d column(s), but you input one with %d column(s) instead");
      m % m_bias.extent(0) % weight.extent(1);
      throw std::runtime_error(m.str());
    }
    m_weight.reference(bob::core::array::ccopy(weight));

  }
コード例 #11
0
void bob::learn::boosting::LUTTrainer::weightedHistogram(const blitz::Array<uint16_t,1>& features, const blitz::Array<double,1>& weights) const{
  bob::core::array::assertSameShape(features, weights);
  _gradientHistogram = 0.;
  for (int i = features.extent(0); i--;){
    _gradientHistogram((int)features(i)) += weights(i);
  }
}
コード例 #12
0
ファイル: GMMMachine.cpp プロジェクト: 183amir/bob.learn.em
double bob::learn::em::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x) const {
  // Check dimension
  bob::core::array::assertSameDimensionLength(x.extent(0), m_n_inputs);
  // Call the other logLikelihood_ (overloaded) function
  // (log_weighted_gaussian_likelihoods will be discarded)
  return logLikelihood_(x,m_cache_log_weighted_gaussian_likelihoods);
}
コード例 #13
0
double bob::math::det_(const blitz::Array<double,2>& A)
{
  // Size variable
  int N = A.extent(0);

  // Perform an LU decomposition
  blitz::Array<double,2> L(N,N);
  blitz::Array<double,2> U(N,N);
  blitz::Array<double,2> P(N,N);
  math::lu(A, L, U, P);

  // Compute the determinant of A = det(P*L)*PI(diag(U))
  //  where det(P*L) = +- 1 (Number of permutation in P)
  //  and PI(diag(U)) is the product of the diagonal elements of U
  blitz::Array<double,2> Lperm(N,N);
  math::prod(P,L,Lperm);
  int s = 1;
  double Udiag=1.;
  for (int i=0; i<N; ++i)
  {
    for (int j=i+1; j<N; ++j)
      if (P(i,j) > 0)
      {
        s = -s;
        break;
      }
    Udiag *= U(i,i);
  }

  return s*Udiag;
}
コード例 #14
0
ファイル: GMMMachine.cpp プロジェクト: 183amir/bob.learn.em
void bob::learn::em::GMMMachine::setVarianceThresholds(const blitz::Array<double, 2>& variance_thresholds) {
  bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_gaussians);
  bob::core::array::assertSameDimensionLength(variance_thresholds.extent(1), m_n_inputs);
  for(size_t i=0; i<m_n_gaussians; ++i)
    m_gaussians[i]->setVarianceThresholds(variance_thresholds(i,blitz::Range::all()));
  m_cache_supervector = false;
}
コード例 #15
0
double bob::math::slogdet_(const blitz::Array<double,2>& A, int& sign)
{
  // Size variable
  int N = A.extent(0);

  // Perform an LU decomposition
  blitz::Array<double,2> L(N,N);
  blitz::Array<double,2> U(N,N);
  blitz::Array<double,2> P(N,N);
  math::lu(A, L, U, P);

  // Compute the determinant of A = det(P*L)*SI(diag(U))
  //  where det(P*L) = +- 1 (Number of permutation in P)
  //  and SI(diag(log|U|)) is the sum of the logarithm of the
  //  diagonal elements of U
  blitz::Array<double,2> Lperm(N,N);
  math::prod(P,L,Lperm);
  sign = 1;
  double Udiag=0.;
  for (int i=0; i<N; ++i)
  {
    for (int j=i+1; j<N; ++j)
      if (P(i,j) > 0)
      {
        sign = -sign;
        break;
      }
    Udiag += log(fabs(U(i,i)));
  }
  // Check for infinity
  if ((Udiag*-1) == std::numeric_limits<double>::infinity())
    sign = 0;

  return Udiag;
}
コード例 #16
0
ファイル: GMMMachine.cpp プロジェクト: 183amir/bob.learn.em
void bob::learn::em::GMMMachine::setMeans(const blitz::Array<double,2> &means) {
  bob::core::array::assertSameDimensionLength(means.extent(0), m_n_gaussians);
  bob::core::array::assertSameDimensionLength(means.extent(1), m_n_inputs);
  for(size_t i=0; i<m_n_gaussians; ++i)
    m_gaussians[i]->updateMean() = means(i,blitz::Range::all());
  m_cache_supervector = false;
}
コード例 #17
0
ファイル: ncutil.hpp プロジェクト: seifer08ms/icebin
void netcdf_write_blitz(NcVar *nc_var, blitz::Array<T, rank> const &val)
{
	long counts[rank];
	for (int i=0; i<rank; ++i) counts[i] = val.extent(i);
//printf("netcdf_write_blitz: %p %p\n", nc_var, val.data());
	nc_var->put(val.data(), counts);
}
コード例 #18
0
ファイル: GMMMachine.cpp プロジェクト: 183amir/bob.learn.em
void bob::learn::em::GMMMachine::setVarianceSupervector(const blitz::Array<double,1> &variance_supervector) {
  bob::core::array::assertSameDimensionLength(variance_supervector.extent(0), m_n_gaussians*m_n_inputs);
  for(size_t i=0; i<m_n_gaussians; ++i) {
    m_gaussians[i]->updateVariance() = variance_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1));
    m_gaussians[i]->applyVarianceThresholds();
  }
  m_cache_supervector = false;
}
コード例 #19
0
ファイル: GMMMachine.cpp プロジェクト: 183amir/bob.learn.em
void bob::learn::em::GMMMachine::accStatistics_(const blitz::Array<double,2>& input, bob::learn::em::GMMStats& stats) const {
  // iterate over data
  blitz::Range a = blitz::Range::all();
  for(int i=0; i<input.extent(0); ++i) {
    // Get example
    blitz::Array<double,1> x(input(i, a));
    // Accumulate statistics
    accStatistics_(x,stats);
  }
}
コード例 #20
0
int32_t bob::learn::boosting::LUTTrainer::bestIndex(const blitz::Array<double,1>& array) const{
  double min = std::numeric_limits<double>::max();
  int32_t minIndex = -1;
  for (int i = 0; i < array.extent(0); ++i){
    if (array(i) < min){
      min = array(i);
      minIndex = i;
    }
  }
  return minIndex;
}
コード例 #21
0
ファイル: TanTriggs.cpp プロジェクト: 183amir/bob.ip.base
void bob::ip::base::TanTriggs::performContrastEqualization(blitz::Array<double,2>& dst)
{
  const double inv_alpha = 1./m_alpha;
  const double wxh = dst.extent(0)*dst.extent(1);

  // first step: I:=I/mean(abs(I)^a)^(1/a)
  blitz::Range dst_y( dst.lbound(0), dst.ubound(0)),
               dst_x( dst.lbound(1), dst.ubound(1));
  double norm_fact =
    pow( sum( pow( fabs(dst(dst_y,dst_x)), m_alpha)) / wxh, inv_alpha);
  dst(dst_y,dst_x) /= norm_fact;

  // Second step: I:=I/mean(min(threshold,abs(I))^a)^(1/a)
  const double threshold_alpha = pow( m_threshold, m_alpha );
  norm_fact =  pow( sum( min( threshold_alpha,
    pow( fabs(dst(dst_y,dst_x)), m_alpha))) / wxh, inv_alpha);
  dst(dst_y,dst_x) /= norm_fact;

  // Last step: I:= threshold * tanh( I / threshold )
  dst(dst_y,dst_x) = m_threshold * tanh( dst(dst_y,dst_x) / m_threshold );
}
コード例 #22
0
ファイル: ncutil.hpp プロジェクト: seifer08ms/icebin
boost::function<void ()> netcdf_define(
	NcFile &nc,
	std::string const &vname,
	blitz::Array<T,rank> const &val,
	std::vector<NcDim *> const &ddims = {})
{
	// Type-check for unit strides
	int stride = 1;
	for (int i=rank-1; i>=0; --i) {
		if (val.stride(i) != stride) {
			fprintf(stderr, "Unexpected stride of %d (should be %d) in dimension %d (extent=%d) of %s (rank=%d)\n", val.stride(i), stride, i, val.extent(i), vname.c_str(), rank);
			fprintf(stderr, "Are you trying to write a Fortran-style array?  Use f_to_c() in blitz.hpp first\n");
			throw std::exception();
		}
//printf("(stride=%d) *= (val.extent[%d]=%d)\n", stride, i, val.extent(i));
		stride *= val.extent(i);
	}

	// Create the required dimensions
	NcDim const *dims[rank];
	for (int i=0; i<rank; ++i) {
		if (i >= ddims.size()) {
			char dim_name[200];
			sprintf(dim_name, "%s.dim%d", vname.c_str(), i);
			dims[i] = nc.add_dim(dim_name, val.extent(i));
		} else {
			dims[i] = ddims[i];
		}
        assert(dims[i] != NULL);
	}

	// Create the variable
	NcVar *nc_var = nc.add_var(vname.c_str(), get_nc_type<T>(), rank, dims);
        assert(nc_var != NULL);

	// Write it out (later)
	return boost::bind(&netcdf_write_blitz<T,rank>, nc_var, val);
}
コード例 #23
0
ファイル: blitz_op.hpp プロジェクト: Swagataacharya/TRIQS
void matmul_A_M_B( const blitz::Array<VAL,2> & A, blitz::Array<VAL,2> & M, const blitz::Array<VAL,2> & B ) { 
  assert (A.extent(0) == A.extent(1));   
  assert (B.extent(0) == B.extent(1));   
  assert (A.extent(1) == M.extent(0));   
  assert (B.extent(0) == M.extent(1));
  blitz::Array<VAL,2> tmp(M.extent(0),B.extent(1), blitz::fortranArray);
  matmul_lapack(M, B ,tmp);
  matmul_lapack( A, tmp, M);
}
コード例 #24
0
ファイル: writer.cpp プロジェクト: 183amir/bob.io.audio
void bob::io::audio::Writer::append(const blitz::Array<double,1>& data) {

  if (!m_opened) {
    boost::format m("audio writer for file `%s' is closed and cannot be written to");
    m % m_filename;
    throw std::runtime_error(m.str());
  }

  if (!m_typeinfo.shape[0]) /* set for the first time */ {
    m_file->signal.channels = data.extent(0);
    m_typeinfo.shape[0] = data.extent(0);
    m_typeinfo.update_strides();
  }

  //checks data specifications
  if (m_typeinfo.shape[0] != (size_t)data.extent(0)) {
    boost::format m("input sample size for file `%s' should be (%d,)");
    m % m_filename % m_typeinfo.shape[0];
    throw std::runtime_error(m.str());
  }

  for (int j=0; j<data.extent(0); ++j)
    m_buffer[j] = (sox_sample_t)(data(j) * bob::io::audio::SOX_CONVERSION_COEF);
  size_t written = sox_write(m_file.get(), m_buffer.get(), m_typeinfo.shape[0]);

  // updates internal counters
  m_file->signal.length += m_file->signal.channels;
  m_typeinfo.shape[1] += 1;
  m_typeinfo.update_strides();

  if (written != 1) {
    boost::format m("I was asked to append 1 sample to file `%s', but `sox_write()' failed miserably - this is not a definitive error, the stream is still sane");
    m % m_filename;
    throw std::runtime_error(m.str());
  }
}
コード例 #25
0
void bob::math::eigSym(const blitz::Array<double,2>& A, const blitz::Array<double,2>& B,
  blitz::Array<double,2>& V, blitz::Array<double,1>& D)
{
  // Size variable
  const int N = A.extent(0);
  const blitz::TinyVector<int,1> shape1(N);
  const blitz::TinyVector<int,2> shape2(N,N);
  bob::core::array::assertZeroBase(A);
  bob::core::array::assertZeroBase(B);
  bob::core::array::assertZeroBase(V);
  bob::core::array::assertZeroBase(D);

  bob::core::array::assertSameShape(A,shape2);
  bob::core::array::assertSameShape(B,shape2);
  bob::core::array::assertSameShape(V,shape2);
  bob::core::array::assertSameShape(D,shape1);

  bob::math::eigSym_(A, B, V, D);
}
コード例 #26
0
boost::shared_ptr<bob::learn::boosting::LUTMachine> bob::learn::boosting::LUTTrainer::train(const blitz::Array<uint16_t,2>& trainingFeatures, const blitz::Array<double,2>& lossGradient) const{
  int featureLength = trainingFeatures.extent(1);
  _lossSum.resize(featureLength, m_numberOfOutputs);
  // Compute the sum of the gradient based on the feature values or the loss associated with each feature index
  // Compute the loss for each feature
  for (int featureIndex = featureLength; featureIndex--;){
    for (int outputIndex = m_numberOfOutputs; outputIndex--;){
      weightedHistogram(trainingFeatures(blitz::Range::all(),featureIndex), lossGradient(blitz::Range::all(), outputIndex));
      _lossSum(featureIndex,outputIndex) = - blitz::sum(blitz::abs(_gradientHistogram));
    }
  }

  // Select the most discriminative index (or indices) for classification which minimizes the loss
  //  and compute the sum of gradient for that index
  if (m_selectionType == independent){
    // independent feature selection is used if all the dimension of output use different feature
    // each of the selected feature minimize a dimension of the loss function
    for (int outputIndex = m_numberOfOutputs; outputIndex--;){
      _selectedIndices(outputIndex) = bestIndex(_lossSum(blitz::Range::all(),outputIndex));
    }
  } else {
    // for 'shared' feature selection the loss function is summed over multiple dimensions and
    // the feature that minimized this cumulative loss is used for all the outputs
    blitz::secondIndex j;
    const blitz::Array<double,1> sum(blitz::sum(_lossSum, j));
    _selectedIndices = bestIndex(sum);
  }

  // compute the look-up-tables for the best index
  for (int outputIndex = m_numberOfOutputs; outputIndex--;){
    int selectedIndex = _selectedIndices(outputIndex);
    weightedHistogram(trainingFeatures(blitz::Range::all(), selectedIndex), lossGradient(blitz::Range::all(), outputIndex));

    for (int lutIndex = m_maximumFeatureValue; lutIndex--;){
      _luts(lutIndex, outputIndex) = (_gradientHistogram(lutIndex) > 0) * 2. - 1.;
    }
  }

  // create new weak machine
  return boost::shared_ptr<LUTMachine>(new LUTMachine(_luts.copy(), _selectedIndices.copy()));

}
コード例 #27
0
ファイル: Ceps.cpp プロジェクト: bioidiap/bob.ap
void bob::ap::Ceps::addDerivative(const blitz::Array<double,2>& input, blitz::Array<double,2>& output) const
{
  // Initialize output to zero
  output = 0.;

  const int n_frames = input.extent(0);
  blitz::Range rall = blitz::Range::all();

  // Fill in the inner part as follows:
  // \f$output[i] += \sum_{l=1}^{DW} l * (input[i+l] - input[i-l])\f$
  for (int l=1; l<=(int)m_delta_win; ++l) {
    blitz::Range rout(l,n_frames-l-1);
    blitz::Range rp(2*l,n_frames-1);
    blitz::Range rn(0,n_frames-2*l-1);
    output(rout,rall) += l*(input(rp,rall) - input(rn,rall));
  }

  const double factor = m_delta_win*(m_delta_win+1)/2;
  // Continue to fill the left boundary part as follows:
  // \f$output[i] += (\sum_{l=1+i}^{DW} l*input[i+l]) - (\sum_{l=i+1}^{DW}l)*input[0])\f$
  for (int i=0; i<(int)m_delta_win; ++i) {
    output(i,rall) -= (factor - i*(i+1)/2) * input(0,rall);
    for (int l=1+i; l<=(int)m_delta_win; ++l) {
      output(i,rall) += l*(input(i+l,rall));
    }
  }
  // Continue to fill the right boundary part as follows:
  // \f$output[i] += (\sum_{l=Nframes-1-i}^{DW}l)*input[Nframes-1]) - (\sum_{l=Nframes-1-i}^{DW} l*input[i-l])\f$
  for (int i=n_frames-(int)m_delta_win; i<n_frames; ++i) {
    int ii = (n_frames-1)-i;
    output(i,rall) += (factor - ii*(ii+1)/2) * input(n_frames-1,rall);
    for (int l=1+ii; l<=(int)m_delta_win; ++l) {
      output(i,rall) -= l*input(i-l,rall);
    }
  }
  // Sum of the integer squared from 1 to delta_win
  // pavel - remove division for the sake of compitability with Matlab code of RFFC features comparison paper
  //const double sum = m_delta_win*(m_delta_win+1)*(2*m_delta_win+1)/3;
  //output /= sum;
}
コード例 #28
0
ファイル: GMMMachine.cpp プロジェクト: 183amir/bob.learn.em
void bob::learn::em::GMMMachine::setMeanSupervector(const blitz::Array<double,1> &mean_supervector) {
  bob::core::array::assertSameDimensionLength(mean_supervector.extent(0), m_n_gaussians*m_n_inputs);
  for(size_t i=0; i<m_n_gaussians; ++i)
    m_gaussians[i]->updateMean() = mean_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1));
  m_cache_supervector = false;
}
コード例 #29
0
double bob::math::slogdet(const blitz::Array<double,2>& A, int& sign)
{
  bob::core::array::assertSameDimensionLength(A.extent(0),A.extent(1));
  return bob::math::slogdet_(A, sign);
}
コード例 #30
0
void bob::math::eigSym_(const blitz::Array<double,2>& A, const blitz::Array<double,2>& B,
  blitz::Array<double,2>& V, blitz::Array<double,1>& D)
{
  // Size variable
  const int N = A.extent(0);

  // Prepares to call LAPACK function
  // Initialises LAPACK variables
  const int itype = 1;
  const char jobz = 'V'; // Get both the eigenvalues and the eigenvectors
  const char uplo = 'U';
  int info = 0;
  const int lda = N;
  const int ldb = N;

  // Initialises LAPACK arrays
  blitz::Array<double,2> A_blitz_lapack;
  // Tries to use V directly
  blitz::Array<double,2> Vt = V.transpose(1,0);
  const bool V_direct_use = bob::core::array::isCZeroBaseContiguous(Vt);
  if (V_direct_use)
  {
    A_blitz_lapack.reference(Vt);
    // Ugly fix for non-const transpose
    A_blitz_lapack = const_cast<blitz::Array<double,2>&>(A).transpose(1,0);
  }
  else
    // Ugly fix for non-const transpose
    A_blitz_lapack.reference(
      bob::core::array::ccopy(const_cast<blitz::Array<double,2>&>(A).transpose(1,0)));
  double *A_lapack = A_blitz_lapack.data();
  // Ugly fix for non-const transpose
  blitz::Array<double,2> B_blitz_lapack(
    bob::core::array::ccopy(const_cast<blitz::Array<double,2>&>(B).transpose(1,0)));
  double *B_lapack = B_blitz_lapack.data();
  blitz::Array<double,1> D_blitz_lapack;
  const bool D_direct_use = bob::core::array::isCZeroBaseContiguous(D);
  if (D_direct_use)
    D_blitz_lapack.reference(D);
  else
    D_blitz_lapack.resize(D.shape());
  double *D_lapack = D_blitz_lapack.data();

  // Calls the LAPACK function
  // A/ Queries the optimal size of the working arrays
  const int lwork_query = -1;
  double work_query;
  const int liwork_query = -1;
  int iwork_query;
  dsygvd_( &itype, &jobz, &uplo, &N, A_lapack, &lda, B_lapack, &ldb, D_lapack,
    &work_query, &lwork_query, &iwork_query, &liwork_query, &info);
  // B/ Computes the generalized eigenvalue decomposition
  const int lwork = static_cast<int>(work_query);
  boost::shared_array<double> work(new double[lwork]);
  const int liwork = static_cast<int>(iwork_query);
  boost::shared_array<int> iwork(new int[liwork]);
  dsygvd_( &itype, &jobz, &uplo, &N, A_lapack, &lda, B_lapack, &ldb, D_lapack,
    work.get(), &lwork, iwork.get(), &liwork, &info);

  // Checks info variable
  if (info != 0)
    throw std::runtime_error("The LAPACK function 'dsygvd' returned a non-zero value. This might be caused by a non-positive definite B matrix.");

  // Copy singular vectors back to V if required
  if (!V_direct_use)
    V = A_blitz_lapack.transpose(1,0);

  // Copy result back to sigma if required
  if (!D_direct_use)
    D = D_blitz_lapack;
}