Example #1
0
/**
 * Integrate SDDE  one step forward for a given vector field
 * and state using the Euler Maruyama scheme.
 * \param[in]     field        Delayed vector fields to evaluate.
 * \param[in]     stocField    stochastic vector field to evaluate.
 * \param[in/out] currentState Current state to update by one time step.
 */
void
EulerMaruyamaSDDE::stepForward(vectorFieldDelay *delayedField,
			       vectorFieldStochastic *stocField,
			       gsl_matrix *currentState)
{
  // Assign pointers to workspace vectors
  gsl_vector_view tmp = gsl_matrix_row(work, 0);
  gsl_vector_view tmp1 = gsl_matrix_row(work, 1);
  gsl_vector_view presentState;

  /** Evaluate drift */
  delayedField->evalField(currentState, &tmp.vector);
  // Scale by time step
  gsl_vector_scale(&tmp.vector, dt);

  /** Update historic */
  updateHistoric(currentState);

  // Assign pointer to present state
  presentState = gsl_matrix_row(currentState, 0);

  // Evaluate stochastic field at present state
  stocField->evalField(&presentState.vector, &tmp1.vector); 
  // Scale by time step
  gsl_vector_scale(&tmp1.vector, sqrt(dt));

  // Add drift to present state
  gsl_vector_add(&presentState.vector, &tmp.vector);

  /** Add diffusion at present state */
  gsl_vector_add(&presentState.vector, &tmp1.vector);

  return;
}
/* Update parameters using an implicit solver for
 * equation (17) of Girolami and Calderhead (2011).
 * Arguments:
 *	state:		a pointer to internal working storage for RMHMC.
 *  model:		a pointer to the rmhmc_model structure with pointers to user defined functions.
 *	N:			number of parameters.
 *	stepSize:	integration step-size.
 *  Result:
 *	 The method directly updates the new_x array in the state structure.
 *	 returns 0 for success or non-zero for failure.
 */
static int parametersNewtonUpdate(rmhmc_params* state, rmhmc_model* model, int N , double stepSize){
	
	gsl_vector_view new_x_v = gsl_vector_view_array(state->new_x, N);
	gsl_vector_view new_p_v = gsl_vector_view_array(state->new_momentum, N);
	gsl_matrix_view new_cholM_v = gsl_matrix_view_array(state->new_cholMx, N, N);
	
	/* temp copy of parameters */
	gsl_vector_view x0_v = gsl_vector_view_array(state->btmp, N);
	gsl_vector_memcpy(&x0_v.vector, &new_x_v.vector);
	
	/* temp copy of inverse Metric */
	gsl_matrix_view new_invM_v = gsl_matrix_view_array(state->new_invMx, N, N);
	gsl_matrix_view invM0_v = gsl_matrix_view_array(state->tmpM, N, N);
	gsl_matrix_memcpy(&invM0_v.matrix, &new_invM_v.matrix);
	
	gsl_vector_view a_v = gsl_vector_view_array(state->atmp, N);

	/* a = invM0*pNew */
	/* TODO: replace gsl_blas_dgemv with gsl_blas_dsymv since invM0_v.matrix is symetric */
	gsl_blas_dgemv(CblasNoTrans, 1.0, &invM0_v.matrix, &new_p_v.vector, 0.0, &a_v.vector);
	
	int iterations = state->fIt;
	int flag = 0;
	int i;
	for (i = 0; i < iterations; i++) {
		/* new_x = invM_new*p_new */
		/* TODO: replace gsl_blas_dgemv with gsl_blas_dsymv since inew_invM_v.matrix is symetric */
		gsl_blas_dgemv(CblasNoTrans, 1.0, &new_invM_v.matrix, &new_p_v.vector, 0.0, &new_x_v.vector);
		
		/* Calculates new_x_v = x0 + 0.5*stepSize*(invM_0*newP + newInvM*newP) */
		gsl_vector_add(&new_x_v.vector, &a_v.vector);
		gsl_vector_scale(&new_x_v.vector, 0.5*stepSize);
		gsl_vector_add(&new_x_v.vector, &x0_v.vector);
		
		/* calculate metric at the current position or update everything if this is the last iteration */
		if ( (i == iterations-1) )
			/* call user defined function for updating all quantities */
			model->PosteriorAll(state->new_x, model->m_params, &state->new_fx, state->new_dfx, state->new_cholMx, state->new_dMx);
		else
			/* call user defined function for updating only the metric ternsor */
			model->Metric(state->new_x, model->m_params, state->new_cholMx);
		
		/* calculate cholesky factor for current metric */
		gsl_error_handler_t* old_handle =  gsl_set_error_handler_off();
		flag = gsl_linalg_cholesky_decomp( &new_cholM_v.matrix );
		if (flag != 0){
			fprintf(stderr,"RMHMC: matrix not positive definite in parametersNewtonUpdate.\n");
			return flag;
		}
		gsl_set_error_handler(old_handle);
		
		/* calculate inverse for current metric */
		gsl_matrix_memcpy(&new_invM_v.matrix, &new_cholM_v.matrix );
		gsl_linalg_cholesky_invert(&new_invM_v.matrix);
	}
	return flag;
	
}
Example #3
0
int rmvnorm(const gsl_rng *r, const int n, const gsl_vector *mean, 
		const gsl_matrix *var, gsl_vector *result){
    /* multivariate normal distribution random number generator */
    /*
     *	n	dimension of the random vetor
     *	mean	vector of means of size n
     *	var	variance matrix of dimension n x n
     *	result	output variable with a sigle random vector normal distribution generation
     */
    int k;
    gsl_matrix *work = gsl_matrix_alloc(n,n);



    gsl_matrix_memcpy(work,var);
    gsl_linalg_cholesky_decomp(work);

    for(k=0; k<n; k++)
	gsl_vector_set( result, k, gsl_ran_ugaussian(r) );

    gsl_blas_dtrmv(CblasLower, CblasNoTrans, CblasNonUnit, work, result );
    gsl_vector_add(result,mean);
    gsl_matrix_free(work);
    return 0;
}
Example #4
0
/* Generate a random vector from a multivariate Gaussian distribution using
 * the Cholesky decomposition of the variance-covariance matrix, following
 * "Computational Statistics" from Gentle (2009), section 7.4.
 *
 * mu      mean vector (dimension d)
 * L       matrix resulting from the Cholesky decomposition of
 *         variance-covariance matrix Sigma = L L^T (dimension d x d)
 * result  output vector (dimension d)
 */
int
gsl_ran_multivariate_gaussian (const gsl_rng * r,
                               const gsl_vector * mu,
                               const gsl_matrix * L,
                               gsl_vector * result)
{
  const size_t M = L->size1;
  const size_t N = L->size2;

  if (M != N)
    {
      GSL_ERROR("requires square matrix", GSL_ENOTSQR);
    }
  else if (mu->size != M)
    {
      GSL_ERROR("incompatible dimension of mean vector with variance-covariance matrix", GSL_EBADLEN);
    }
  else if (result->size != M)
    {
      GSL_ERROR("incompatible dimension of result vector", GSL_EBADLEN);
    }
  else
    {
      size_t i;

      for (i = 0; i < M; ++i)
        gsl_vector_set(result, i, gsl_ran_ugaussian(r));

      gsl_blas_dtrmv(CblasLower, CblasNoTrans, CblasNonUnit, L, result);
      gsl_vector_add(result, mu);

      return GSL_SUCCESS;
    }
}
Example #5
0
/// Add a vector
/// @param v :: The other vector
GSLVector &GSLVector::operator+=(const GSLVector &v) {
  if (size() != v.size()) {
    throw std::runtime_error("GSLVectors have different sizes.");
  }
  gsl_vector_add(gsl(), v.gsl());
  return *this;
}
Example #6
0
void
gslu_rand_gaussian_matrix (const gsl_rng *r, gsl_matrix *A, 
                           const gsl_vector *mu, const gsl_matrix *Sigma, const gsl_matrix *L)
{
    assert (A->size1 == mu->size && (Sigma || L));
    for (size_t i=0; i<A->size1; i++)
        for (size_t j=0; j<A->size2; j++)
            gsl_matrix_set (A, i, j, gsl_ran_gaussian_ziggurat (r, 1.0));
    
    if (L) {
        assert (L->size1 == L->size2 && L->size1 == mu->size);
        gsl_blas_dtrmm (CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit, 1.0, L, A);
    }
    else {
        assert (Sigma->size1 == Sigma->size2 && Sigma->size1 == mu->size);
        gsl_matrix *_L = gsl_matrix_alloc (Sigma->size1, Sigma->size2);
        gsl_matrix_memcpy (_L, Sigma);
        gsl_linalg_cholesky_decomp (_L);
        gsl_blas_dtrmm (CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit, 1.0, _L, A);
        gsl_matrix_free (_L);
    }

    for (size_t j=0; j<A->size2; j++) {
        gsl_vector_view a = gsl_matrix_column (A, j);
        gsl_vector_add (&a.vector, mu);
    }
}
Example #7
0
/** 
 * Evaluate the delayed vector field from fields for each delay.
 * \param[in]  state State at which to evaluate the vector field.
 * \param[out] field Vector resulting from the evaluation of the vector field.
 */
void
vectorFieldDelay::evalField(gsl_matrix *state, gsl_vector *field)
{
  gsl_vector_view delayedState;
  unsigned int delay;

  // Set field evaluation to 0
  gsl_vector_set_zero(field);

  /** Add delayed drifts */
  for (size_t d = 0; d < nDelays; d++)
    {
      delay = gsl_vector_uint_get(delays, nDelays - d - 1);
      
      // Assign pointer to delayed state
      delayedState = gsl_matrix_row(state, delay);
      
      // Evaluate vector field at delayed state
      fields->at(nDelays - d - 1)->evalField(&delayedState.vector, work);
      
      // Add to newState in workspace
      gsl_vector_add(field, work);
    }

  return;
}
Example #8
0
File: em_weight.c Project: b-k/tea
void merge_in_weights_so_far(apop_data *new_bit, apop_data *main_set){
    if (!new_bit) return;
    if (new_bit->weights->size != main_set->weights->size)
        merge_two_sets(new_bit, main_set);   //shrinks main_set
    else //candidate, prior_candidate, and cp differ only by weights
        gsl_vector_add(new_bit->weights, main_set->weights);
}
Example #9
0
void gsl_vector_step_random(const gsl_rng* r, gsl_vector* v,
                            const double step_size)
{
  const size_t n = v->size;
  gsl_vector* vp = gsl_vector_alloc(n);

  // Set normal distributed random numbers as elements of v_new and
  // compute the euclidean norm of this vector.
  double length = 0.;
  for (size_t i = 0; i < n; ++i)
  {
    double* vp_i = gsl_vector_ptr(vp, i);
    *vp_i = gsl_ran_ugaussian(r);
    length += pow(*vp_i, 2);
  }
  length = sqrt(length);

  // Scale vp so that the elements of vp are uniformly distributed
  // within an n-sphere of radius step_size.
  const double scale = pow(pow(step_size, boost::numeric_cast<int>(n))
    * gsl_rng_uniform_pos(r), 1.0/n) / length;
  gsl_vector_scale(vp, scale);

  gsl_vector_add(v, vp);
}
Example #10
0
void update_unit(unit *u, camera *cam, PLAYERS *players) {
  if(is_unit_dead(u)) {
    if(u->state != NULL)
      unit_dead(u);
    return;
  }

  // set the velocity to 0
  gsl_vector_scale(u->velocity, 0);

  if(!((state *) u->state->value)->update(players, cam, u)) {
    pop_unit_state(u);
  }

  double norm = gsl_blas_dnrm2(u->velocity);
  if(norm > 0) {
    gsl_vector_scale(u->velocity, 1 / norm);

    gsl_vector_memcpy(u->heading, u->velocity);

    gsl_vector_scale(u->velocity, u->attributes.speed);
    gsl_vector_add(u->position, u->velocity);

    gsl_vector_set(u->side, 0, -y(u->heading));
    gsl_vector_set(u->side, 1, x(u->heading));
  }
}
Example #11
0
File: utils.c Project: hwp/notGHMM
void gaussian_gen(const gsl_rng* rng, const gaussian_t* dist,
    gsl_vector* result) {
  assert(result->size == dist->dim);

  size_t i;
  for (i = 0; i < result->size; i++) {
    gsl_vector_set(result, i, gsl_ran_ugaussian(rng));
  }

  if (gaussian_isdiagonal(dist)) {
    for (i = 0; i < result->size; i++) {
      double* p = gsl_vector_ptr(result, i);
      *p *= DEBUG_SQRT(gsl_vector_get(dist->diag, i));
    }
  }
  else {
    gsl_matrix* v = gsl_matrix_alloc(dist->dim, dist->dim);
    gsl_matrix_memcpy(v, dist->cov);

    gsl_linalg_cholesky_decomp(v);
    gsl_blas_dtrmv(CblasLower, CblasNoTrans, CblasNonUnit, v, result);

    gsl_matrix_free(v);
  }

  gsl_vector_add(result, dist->mean);
}
Example #12
0
GslVector&
GslVector::operator+=(const GslVector& rhs)
{
  int iRC;
  iRC = gsl_vector_add(m_vec,rhs.m_vec);
  queso_require_msg(!(iRC), "failed");
  return *this;
}
gsl_vector* determineMean(gsl_vector* object, int length, gsl_vector* mean)
{
  
  gsl_vector_add(mean, object);
  
  return 0;
  
}
Example #14
0
void mcmclib_gauss_mrw_sample(mcmclib_mh_q* q, gsl_vector* x) {
  gsl_matrix* Sigma = (gsl_matrix*) q->gamma;
  gsl_vector* x_old = gsl_vector_alloc(x->size);
  gsl_vector_memcpy(x_old, x);
  mcmclib_mvnorm(q->r, Sigma, x);
  gsl_vector_add(x, x_old);
  gsl_vector_free(x_old);
}
Example #15
0
/** **************************************************************************************************************/ 
int rv_g_inner_gaus (const gsl_vector *epsilonvec, void *params, double *gvalue)
{  
  
  double epsilon=gsl_vector_get(epsilonvec,0);
   const gsl_vector *Y = ((struct fnparams *) params)->Y;/** response variable **/
   const gsl_matrix *X = ((struct fnparams *) params)->X;/** design matrix INC epsilon col **/    
   const gsl_vector *beta = ((struct fnparams *) params)->beta;/** fixed covariate and precision terms **/
   gsl_vector *vectmp1 = ((struct fnparams *) params)->vectmp1;
   gsl_vector *vectmp1long = ((struct fnparams *) params)->vectmp1long;
   gsl_vector *vectmp2long = ((struct fnparams *) params)->vectmp2long;
   
   double tau_rv = gsl_vector_get(beta,beta->size-2);/** inc the precision terms - second last entries */
   double tau_resid = gsl_vector_get(beta,beta->size-1);/** last entry - residual precision */
   double n = (double)(Y->size);/** number of observations */
   int i;
   
   double term1,term2;
   
   
   /** easy terms collected together - no Y,X, or betas **/
   term1 = (n/2.0)*log(tau_resid/(2.0*M_PI)) - (tau_rv/2.0)*epsilon*epsilon + 0.5*log(tau_rv/(2.0*M_PI));
   
   
   /** now for the more complex term */
   /** the design matrix does not include precisions but does include epsilon, beta includes precisions but not epsilon. To use matrix operations
       we make a copy of beta and replace one precision value with value for epsilon - copy into vectmp1 */
   for(i=0;i<beta->size-2;i++){gsl_vector_set(vectmp1,i,gsl_vector_get(beta,i));} /** copy **/ 
   gsl_vector_set(vectmp1,beta->size-2,epsilon); /** last entry in vectmp1 is not precision but epsilon **/
   
   /*for(i=0;i<vectmp1->size;i++){Rprintf("=>%f\n",gsl_vector_get(vectmp1,i));} */
     
   /** get X%*%beta where beta = (b0,b1,...,epsilon) and so we get a vector of b0*1+b1*x1i+b2*x2i+epsilon*1 for each obs i */
    gsl_blas_dgemv (CblasNoTrans, 1.0, X, vectmp1, 0.0, vectmp1long);/** vectmp1long hold X%*%vectmp1 = X%*%mybeta **/  
    /*for(i=0;i<vectmp1long->size;i++){Rprintf("=%f\n",gsl_vector_get(vectmp1long,i));}*/
    
    /*Rprintf("---\n");for(i=0;i<X->size1;i++){for(j=0;j<X->size2;j++){Rprintf("%f ",gsl_matrix_get(X,i,j));}Rprintf("\n");}Rprintf("---\n");*/

   /*for(i=0;i<vectmp2long->size;i++){Rprintf(">%f\n",gsl_vector_get(vectmp2long,i));}*/
   
   gsl_vector_scale(vectmp1long,-1.0);/** multiple each entry by -1 **/
   gsl_vector_memcpy(vectmp2long,Y);/** vectmp2long becomes Y **/
   gsl_vector_add(vectmp2long,vectmp1long);/** vectmp2long becomes Y-XB **/
   
   /*for(i=0;i<vectmp2long->size;i++){Rprintf("> %f\n",gsl_vector_get(vectmp2long,i));}*/
   
   /** need sum of (Y-XB)^2 so just do a dot product **/
   gsl_vector_memcpy(vectmp1long,vectmp2long);/** copy vectmp2long into vectmp1long */
   gsl_blas_ddot (vectmp2long, vectmp1long, &term2);/** just to get the sum of (Y-XB)^2 */
   term2 *= -(tau_resid/2.0);
   
   /*Rprintf("term2=%f epsilon=%f tau_resid=%f\n",term2,epsilon,tau_resid);*/
   
  *gvalue = (-1.0/n)*(term1 + term2);
   /*Rprintf("\n----value of term1 %f %f %f----\n",((storedbl1+storedbl2)*(-1/n)),term2,term3); */
  if(gsl_isnan(*gvalue)){error("\n oops - got an NAN! in g_rv_g_inner_gaus-----\n");}	
  
  return GSL_SUCCESS;
}
Example #16
0
void RandomNumberGenerator::gaussian_mv(const vector<double> &mean, const vector<vector<double> > &covar, const vector<double> &min, const vector<double> &max, vector<double> &result){
  
  /* multivariate normal distribution random number generator */
  /*
   *	n	dimension of the random vetor
   *	mean	vector of means of size n
   *	var	variance matrix of dimension n x n
   *	result	output variable with a sigle random vector normal distribution generation
   */
  int k;
  int n=mean.size();
  gsl_matrix *_covar = gsl_matrix_alloc(covar.size(),covar[0].size());
  gsl_vector *_result = gsl_vector_calloc(mean.size());
  gsl_vector *_mean = gsl_vector_calloc(mean.size());
  result.resize(mean.size());

  for(k=0;k<n;k++){
    for(int j=0;j<n;j++){
      gsl_matrix_set(_covar,k,j,covar[k][j]);
    }
    gsl_vector_set(_mean, k, mean[k]);
  }

  int status = gsl_linalg_cholesky_decomp(_covar);
  if(status){
    printf("ERROR: Covariance matrix appears to be un-invertible. Increase your convergence step length to better sample the posterior such that you have enough samples to create a non-singular matrix at first matrix update.\nExiting...\n");
    exit(1);
  }

  bool in_range;
  do{
    for(k=0; k<n; k++)
      gsl_vector_set( _result, k, gsl_ran_ugaussian(r) );
    
    gsl_blas_dtrmv(CblasLower, CblasNoTrans, CblasNonUnit, _covar, _result);
    gsl_vector_add(_result,_mean);
    
    in_range = true;
    for(k=0; k<n; k++){
      if(gsl_vector_get(_result, k) < min[k] or gsl_vector_get(_result, k) > max[k]){
	in_range = false;
	k=n+1;
      }
    }
  }while(not in_range);

    for(k=0; k<n; k++){
      result[k] = gsl_vector_get(_result, k);
    }

  gsl_matrix_free(_covar);
  gsl_vector_free(_result);
  gsl_vector_free(_mean);
  
  return;
}
Example #17
0
/** ***************************************************************************************/
int rv_dg_inner_gaus (const gsl_vector *epsilonvec, void *params, gsl_vector *dgvalues)
{

   /*double epsilon=0.3; */
   double epsilon=gsl_vector_get(epsilonvec,0);
   const gsl_vector *Y = ((struct fnparams *) params)->Y;/** response variable **/
   const gsl_matrix *X = ((struct fnparams *) params)->X;/** design matrix INC epsilon col **/    
   const gsl_vector *beta = ((struct fnparams *) params)->beta;/** fixed covariate and precision terms **/
   gsl_vector *vectmp1 = ((struct fnparams *) params)->vectmp1;
   gsl_vector *vectmp1long = ((struct fnparams *) params)->vectmp1long;
   gsl_vector *vectmp2long = ((struct fnparams *) params)->vectmp2long;
   
   double tau_rv = gsl_vector_get(beta,beta->size-2);/** inc the precision terms - second last entries */
   double tau_resid = gsl_vector_get(beta,beta->size-1);/** last entry - residual precision */
   double n = (double)(Y->size);/** number of observations */
   int i;
   
   double term3,term2;
        
   term3 = (tau_rv*epsilon)/n;/** correct sign */
  
   /** now for the more complex term */
   /** the design matrix does not include precisions but does include epsilon, beta includes precisions but not epsilon. To use matrix operations
       we make a copy of beta and replace one precision value with value for epsilon - copy into vectmp1 */
   for(i=0;i<beta->size-2;i++){gsl_vector_set(vectmp1,i,gsl_vector_get(beta,i));} /** copy **/ 
   gsl_vector_set(vectmp1,beta->size-2,epsilon); /** last entry in vectmp1 is not precision but epsilon **/
   
   /*for(i=0;i<vectmp1->size;i++){Rprintf("=>%f\n",gsl_vector_get(vectmp1,i));} */
     
   /** get X%*%beta where beta = (b0,b1,...,epsilon) and so we get a vector of b0*1+b1*x1i+b2*x2i+epsilon*1 for each obs i */
    gsl_blas_dgemv (CblasNoTrans, 1.0, X, vectmp1, 0.0, vectmp1long);/** vectmp1long hold X%*%vectmp1 = X%*%mybeta **/  
    /*for(i=0;i<vectmp1long->size;i++){Rprintf("=%f\n",gsl_vector_get(vectmp1long,i));}*/
    
    /*Rprintf("---\n");for(i=0;i<X->size1;i++){for(j=0;j<X->size2;j++){Rprintf("%f ",gsl_matrix_get(X,i,j));}Rprintf("\n");}Rprintf("---\n");*/

   /*for(i=0;i<vectmp2long->size;i++){Rprintf(">%f\n",gsl_vector_get(vectmp2long,i));}*/
   
   gsl_vector_scale(vectmp1long,-1.0);/** multiple each entry by -1 **/
   gsl_vector_memcpy(vectmp2long,Y);/** vectmp2long becomes Y **/
   gsl_vector_add(vectmp2long,vectmp1long);/** vectmp2long becomes Y-XB **/
   gsl_vector_set_all(vectmp1long,1.0);/** reset each value to unity **/
   gsl_blas_ddot (vectmp2long, vectmp1long, &term2);/** just to get the sum of vectmp2long */
   
  /* Rprintf("analytical solution=%f\n",(tau_resid*term2)/(tau_resid*n + tau_rv));*/
   
   /** This derivative can be solved analytically so no need to return value of d_g/d_epsilon 
   term2 *= -tau_resid/n; 
   
     gsl_vector_set(dgvalues,0,term2+term3); 
     if(gsl_isnan(gsl_vector_get(dgvalues,0))){error("rv_dg_inner is nan %f %f %f\n",term2,term3);}
   **/
   
   gsl_vector_set(dgvalues,0,(tau_resid*term2)/(tau_resid*n + tau_rv)); /** solves dg/d_epsilon=0 */
   
 return GSL_SUCCESS;  
}
Example #18
0
 /** Addition operator (vector) */
 vector<double> vector<double>::operator+(const vector<double>& v)
 {
   vector<double> v1(_vector);
   if (gsl_vector_add(v1.as_gsl_type_ptr(), v.as_gsl_type_ptr()))
     {
       std::cout << "\n Error in vector<double> +" << std::endl;
       exit(EXIT_FAILURE);
     }
   return v1;
 }
Example #19
0
static void at7_q_sample(mcmclib_mh_q* q, gsl_vector* x) {
  at7_gamma* gamma = (at7_gamma*) q->gamma;
  at7_components_weights(gamma, x);
  const size_t k = sample(q->r, gamma->weights);
  gsl_vector* oldx = gsl_vector_alloc(x->size);
  gsl_vector_memcpy(oldx, x);
  mcmclib_mvnorm(q->r, gamma->qVariances[k], x);
  gsl_vector_add(x, oldx);
  gsl_vector_free(oldx);
}
Example #20
0
void chapeau_sum ( chapeau * ch1, chapeau * ch2 ) {
  //Overwrite ch1 with ch1+ch2
  int i,j,k;

  if (sizeof(ch1)!=sizeof(ch2)) {
    fprintf(stderr,"CFACV/C) ERROR: you can not sum chapeau objects with different sizes\n");
    exit(-1);
  }
  if (ch1->rmin!=ch2->rmin || ch1->rmax!=ch2->rmax || ch1->dr!=ch2->dr ) {
    fprintf(stderr,"CFACV/C) ERROR: you can not sum chapeau objects with different sizes\n");
    exit(-1);
  }

  for (i=0;i<ch1->N;i++) {
    if ( ch1->mask[i]!=ch2->mask[i] ) {
      fprintf(stderr,"CFACV/C) ERROR: you can not sum chapeau objects with different masks\n");
      exit(-1);
    }
  }    

  gsl_vector_add(ch1->b   ,ch2->b);
  gsl_matrix_add(ch1->A   ,ch2->A);
  gsl_vector_add(ch1->bfull,ch2->bfull);
  gsl_matrix_add(ch1->Afull,ch2->Afull);

  for (i=0;i<ch1->m;i++) {
    ch1->hits[i] = (ch1->hits[i]||ch2->hits[i]);
  }

  //// I think that the next is irrelevant since I am saving the state before
  //// the chapeau_output and therefore before the chapeau_update, but I let this
  //// here just for the case.
  //for (i=0;i<ch1->N;i++) {
  //  for (j=0;j<3;j++) {
  //    for (k=0;k<ch1->m;k++) {
  //      ch1->s[i][j][k] = ch1->s[i][j][k] + ch2->s[i][j][k];
  //    }
  //  }
  //}
  //gsl_vector_add(ch1->lam ,ch2->lam); //also irrelevant

}
Example #21
0
GslVector&
GslVector::operator+=(const GslVector& rhs)
{
  int iRC;
  iRC = gsl_vector_add(m_vec,rhs.m_vec);
  UQ_FATAL_RC_MACRO(iRC,
                    m_env.worldRank(),
                    "GslVector::operator+=()",
                    "failed");
  return *this;
}
Example #22
0
void ispc_update_bias_helper(par* p, gsl_vector** dbiases, int i, int nrow, double mu, double lambda, double step){
  gsl_vector* res = gsl_vector_alloc(nrow);
  updatebias_ispc((*(p->biases[i])).data,
                  (*(dbiases[i])).data,
                  (*(p->biases_momentum[i])).data,
                  (*res).data,
                  mu, lambda, step, nrow);
  gsl_vector_memcpy(p->biases_momentum[i], res);
  free(res);

  gsl_vector_add(p->biases[i], p->biases_momentum[i]);
}
static void find_nearest_point(gsl_vector *V, double k, gsl_vector *B, gsl_vector *out){
    /* Find X such that BX =K and there is an S such that X + SB=V. */
    double S=0; //S = (BV-K)/B'B.
    gsl_blas_ddot(B, V, &S);
    S   -= k;
assert(!gsl_isnan(S));
    S   /= magnitude(B);
assert(!gsl_isnan(S));
    gsl_vector_memcpy(out, B); //X = -SB +V
    gsl_vector_scale(out, -S);
    gsl_vector_add(out, V);
assert(!gsl_isnan(gsl_vector_get(out,0)));
}
Example #24
0
/**
 * Integrate one step forward for a given vector field and state
 * using the Runge-Kutta 4 scheme.
 * \param[in]     field        Vector field to evaluate.
 * \param[in,out] currentState Current state to update by one time step.
 */
void
RungeKutta4::stepForward(vectorField *field, gsl_vector *currentState)
{
  /** Use views on a working matrix not to allocate memory
   *  at each time step */
  gsl_vector_view k1, k2, k3, k4, tmp; 

  // Assign views
  tmp = gsl_matrix_row(work, 0);
  k1 = gsl_matrix_row(work, 1);
  k2 = gsl_matrix_row(work, 2);
  k3 = gsl_matrix_row(work, 3);
  k4 = gsl_matrix_row(work, 4);
  
  // First increament
  field->evalField(currentState, &k1.vector);
  gsl_vector_scale(&k1.vector, dt);
  
  gsl_vector_memcpy(&tmp.vector, &k1.vector);
  gsl_vector_scale(&tmp.vector, 0.5);
  gsl_vector_add(&tmp.vector, currentState);

  // Second increment
  field->evalField(&tmp.vector, &k2.vector);
  gsl_vector_scale(&k2.vector, dt);
  
  gsl_vector_memcpy(&tmp.vector, &k2.vector);
  gsl_vector_scale(&tmp.vector, 0.5);
  gsl_vector_add(&tmp.vector, currentState);

  // Third increment
  field->evalField(&tmp.vector, &k3.vector);
  gsl_vector_scale(&k3.vector, dt);
  
  gsl_vector_memcpy(&tmp.vector, &k3.vector);
  gsl_vector_add(&tmp.vector, currentState);

  // Fourth increment
  field->evalField(&tmp.vector, &k4.vector);
  gsl_vector_scale(&k4.vector, dt);

  gsl_vector_scale(&k2.vector, 2);
  gsl_vector_scale(&k3.vector, 2);
  gsl_vector_memcpy(&tmp.vector, &k1.vector);
  gsl_vector_add(&tmp.vector, &k2.vector);
  gsl_vector_add(&tmp.vector, &k3.vector);
  gsl_vector_add(&tmp.vector, &k4.vector);
  gsl_vector_scale(&tmp.vector, 1. / 6);

  // Update state
  gsl_vector_add(currentState, &tmp.vector);

  return;
}
gsl_vector* determineMeanSqu(gsl_vector* object, int length, gsl_vector* meanSqu)
{
  gsl_vector* meanSqutemp = gsl_vector_calloc(length);
  
  gsl_vector_memcpy(meanSqutemp, object);
  gsl_vector_mul(meanSqutemp, object);
  
  gsl_vector_add(meanSqu, meanSqutemp);
  
  gsl_vector_free(meanSqutemp);
  
  return 0;
  
}
Example #26
0
void momentum_update(par* p, gsl_vector** biases, gsl_matrix** weights, double step)
{
  double mu = p->contract_momentum;
  for (int i = 0; i < p->num_layers-1; i++){
    //gsl_vector_scale(p->biases_momentum[i], mu);
    //gsl_blas_daxpy(step, biases[i], p->biases_momentum[i]);
    gsl_vector_add(p->biases[i], p->biases_momentum[i]);

    gsl_matrix_scale(p->weights_momentum[i], mu);
    gsl_matrix_scale(weights[i],step);
    gsl_matrix_add(p->weights_momentum[i],weights[i]);
    gsl_matrix_add(p->weights[i], p->weights_momentum[i]);
  }
}
Example #27
0
void update_gradients(par_c* q, par*p)
{
  gsl_matrix* delta_temp;
  gsl_matrix* a_temp;

  for (int l = 0; l < p->num_layers - 1; l++){
    gsl_vector_add(q->gradient_biases[l],q->delta[l]);
    vec_to_mat(q->delta[l], &delta_temp);
    vec_to_mat(q->transf_x[l], &a_temp);
    gsl_blas_dgemm(CblasTrans,CblasNoTrans,1,delta_temp, a_temp,1,q->gradient_weights[l]);
    gsl_matrix_free(delta_temp);
    gsl_matrix_free(a_temp);
  }  
}
Example #28
0
static CVECTOR *_add(CVECTOR *a, CVECTOR *b, bool invert)
{
	CVECTOR *v = VECTOR_make(a);
	
	if (COMPLEX(v) || COMPLEX(b))
	{
		VECTOR_ensure_complex(v);
		VECTOR_ensure_complex(b);
		gsl_vector_complex_add(CVEC(v), CVEC(b));
	}
	else
		gsl_vector_add(VEC(v), VEC(b));
	
	return v;
}
Example #29
0
/*******************************************************************************
 * clusterupdate: Runs the Wolff algorithm on lattice.
 ******************************************************************************/
int
clusterupdate(lattice_site * lattice, settings conf, double beta)
{
  int i,j,cluster_size, update_start;
  gsl_vector * base, * delta;
  int * update_list;
  int ** bonds;
  double scale;

  base  = gsl_vector_alloc(conf.spindims); /* Direction to switch over */
  delta = gsl_vector_alloc(conf.spindims); /* How much to change a vector */
  update_list = (int *) calloc(conf.elements,sizeof(int));   /* List of Lattice points to switch */
  bonds       = (int **) malloc(conf.elements*sizeof(int*)); /* Bonds Matrix to make sure bonds aren't rechecked. */

  for(i = 0 ; i < conf.elements ; i++)
    bonds[i]  = (int *) calloc(conf.elements,sizeof(int));

  /* Generate a random unit vector to set as base */
  unit_vec(base,conf.rng);

  //Pick a random point on the lattice then pass off to gencluster
  update_start = gsl_rng_uniform_int(conf.rng,conf.elements);
  update_list[update_start] = 1;
  
  //Pass off to gencluster
  cluster_size = gencluster(lattice,conf,update_start,update_list,bonds,base,beta);
  cluster_size++; //So it will include the first element
  
  //Flip the entire cluster
  for(j = 0 ; j < conf.elements ; j++)
  {
    if(update_list[j] == 1)
    {
      gsl_blas_ddot(lattice[j].spin,base,&scale);
      gsl_vector_memcpy(delta,base);
      gsl_vector_scale(delta,-2.0*scale);
      gsl_vector_add(lattice[j].spin,delta);
    }
  }

  gsl_vector_free(base);
  gsl_vector_free(delta);
  free(update_list);
  for(i = 0 ; i < conf.elements ; i++)
    free(bonds[i]);
  free(bonds);
  return(cluster_size);
}
Example #30
0
/**
 * Integrate one step forward for a given vector field and state
 * using the Euler scheme.
 * \param[in]     field        Vector field to evaluate.
 * \param[in,out] currentState Current state to update by one time step.
 */
void
Euler::stepForward(vectorField *field, gsl_vector *currentState)
{
  gsl_vector_view tmp = gsl_matrix_row(work, 0); 

  // Get vector field
  field->evalField(currentState, &tmp.vector);
  
  // Scale by time step
  gsl_vector_scale(&tmp.vector, dt);

  // Add previous state
  gsl_vector_add(currentState, &tmp.vector);

  return;
}