Exemple #1
0
void sample_likelihood_poisson(const gsl_rng *random,
                                int n, int *obs, 
                                int *dk_rowbreaks, int *dk_cols, double *dk_vals,
                                double *s, int **coefs, int *coef_breaks,
                                double *beta)
{
    int i;
    int j;
    int k;
    int row;
    int j_idx;
    double a;
    double lower;
    double upper;
    double left;
    double right;
    
    for(j = 0; j < n; j++)
    {
        if (obs[j] > 0){
            lower = lex_ran_flat(random, 0, gsl_sf_exp(obs[j]*beta[j]));
            lower = gsl_sf_log(lower) / (double)obs[j];
        } else {
            lower = -INFINITY;
        }
        upper = lex_ran_flat(random, 0, beta[j] < -160 ? 1 : gsl_sf_exp(-gsl_sf_exp(beta[j])));
        upper = gsl_sf_log(-gsl_sf_log(upper));
        /* Bound the sampling range */
        for (i = 0; i < coef_breaks[j]; i++)
        {
            /* current row that has a non-zero value for column j */
            row = coefs[j][i];
            
            /* Calculate Dk[i].dot(b_notj), the inner product of the i'th row
               and the beta vector, excluding the j'th column. */
            a = 0;
            for (k = row == 0 ? 0 : dk_rowbreaks[row-1]; k < dk_rowbreaks[row]; k++){
                if (dk_cols[k] == j){
                    j_idx = k;
                } else{
                    a += dk_vals[k] * beta[dk_cols[k]];
                }
            }
            
            /* Find the left and right bounds */
            left = (-s[row] - a) / dk_vals[j_idx];
            right = (s[row] - a) / dk_vals[j_idx];
            if (dk_vals[j_idx] >= 0){
                lower = MAX(lower, left);
                upper = MIN(upper, right);
            } else {
                lower = MAX(lower, right);
                upper = MIN(upper, left);
            }
        }
        beta[j] = gsl_ran_flat(random, lower, upper);
    }
}
Exemple #2
0
/* compute ln(a + b) safely, given ln(a) and ln(b). */
double safe_sum(double ln_a, double ln_b)
{
    if (isnormal(ln_a) && isnormal(ln_b))
        return ln_a < ln_b 
            ? ln_b + gsl_sf_log(gsl_sf_exp(ln_a - ln_b) + 1.0)
            : ln_a + gsl_sf_log(gsl_sf_exp(ln_b - ln_a) + 1.0);
    else if (isnormal(ln_a))
        return ln_a;
    else 
        return ln_b;
}
// -----------------------------------------------------------------
static double error2(double x, void *pars) {
  double al = ((error2_pars_type*)pars)->alpha;
  double hv = h(x, ((error2_pars_type*)pars)->epsilon,
                   ((error2_pars_type*)pars)->order,
                   ((error2_pars_type*)pars)->c);

  // A**B = exp[B log A]
  double num_pow = al + 1.0,         num_arg = 1.0 + x;
  double den_pow = (3.0 + al) / 2.0, den_arg = 1.0 - (x * x);
  double num = gsl_sf_exp(num_pow * gsl_sf_log(num_arg));
  double den = gsl_sf_exp(den_pow * gsl_sf_log(den_arg));
  return (hv * hv * hv * hv * num / den);
}
Exemple #4
0
/* normalize densities.  after this, sum(1/d) should be 1, and sum(o)
   should be the same for any distribution */
void normalize_points(struct wpoint *points, unsigned npoints,
                      double d_norm, double o_norm)
{
    struct wpoint *s, *se = points + npoints;
    double ln_d_norm = gsl_sf_log(d_norm);
    double ln_o_norm =  gsl_sf_log(o_norm);
    for (s = points; s != se; ++s)
    {
        s->ln_d -= ln_d_norm;
        s->ln_o -= ln_o_norm;
        s->d /= d_norm;
        s->o /= o_norm;
    }
}
Exemple #5
0
/* Calculates the normalisation constant of the likelihood function:
 * 1/sqrt(2*pi*sigma²)^beta for all data points (product).
 * This is done in log-space
 */
int /*error flag*/
pdf_normalisation_constant(ode_model_parameters *omp)/*pre-allocated storage for simulation results, used in LogLikelihood calculations*/{
  assert(omp && omp->size);
  int c,C=get_number_of_experimental_conditions(omp);
  int i,F=get_number_of_model_outputs(omp);
  int t,T;
  double E_lN,lN=0; // log normalisation constant;
  double stdv;
  for (c=0;c<C;c++){
    T=omp->E[c]->t->size;
    E_lN=-0.5*(M_LN2+M_LNPI);
    for (t=0;t<T;t++){
      for (i=0;i<F;i++){
	stdv=gsl_vector_get(omp->E[c]->sd_data[t],i);
	if (gsl_finite(stdv)){
	  E_lN-=gsl_sf_log(stdv);
	}
      }
    }
    omp->E[c]->pdf_lognorm=E_lN;
    lN+=E_lN;
  }
  omp->pdf_lognorm=lN;
  return EXIT_SUCCESS;
}
Exemple #6
0
double MCMC_base(gsl_rng *RNG, struct_data *D,struct_para *D_para,struct_priors *D_priors,double *accept,double *h,double para,double (*foo)(struct struct_data *D,struct struct_para *D_para,struct struct_priors *D_priors,double,int,int,int),int c,int l, int m){
	double logu,logaprob,can;
	can=para+gsl_ran_gaussian(RNG,*h);
	logaprob=(*foo)(D,D_para,D_priors,can,c,l,m)-(*foo)(D,D_para,D_priors,para,c,l,m);

	logu=gsl_sf_log(1-gsl_rng_uniform(RNG));
	if (logaprob>logu){para=can;*accept=*accept+1;}
	return(para); 
	}
Exemple #7
0
/* find ln_thresh such that all points with ln_o < ln_thresh are
   'smooth' */
double smooth_threshold(struct wpoint **points, double mult, unsigned *npoints)
{
    unsigned *n = npoints;
    struct wpoint **pp, **pe = points + *n;
    double ln_total = NAN;
    for (pp = points; pp != pe; ++pp)
        ln_total = safe_sum(ln_total, (*pp)->ln_o);

    qsort(points, *n, sizeof(struct wpoint *), ocmp);
    struct wpoint query, *pq = &query, **lb;
    
    /* the log form of mult * avg_over_n(o) */
    query.ln_o = gsl_sf_log(mult) + ln_total - gsl_sf_log(*n);

    lb = lower_bound(points, *n, sizeof(struct wpoint *), ocmp, &pq);
    *n = lb - points;
    
    return lb == pe ? DBL_MAX : (*lb)->ln_o;
}
Exemple #8
0
double washout_integrand (double x,  void * p) {
	struct washoutParams * params  = (struct washoutParams *)p;
	double M = params->M;
	double T = params->T;
	double a=1.0;

	double top =exp(a*a*M*M/(4*T*T*x)+x)+1;
	double bottom =exp(a*a*M*M/(4*T*T*x)+x)-exp(x);

	return  exp(x)/((exp(x)+1.0)*(exp(x)+1.0))*gsl_sf_log(top/bottom);
}
Exemple #9
0
/* 
 *      FUNCTION  
 *         Name:  entropy_production
 *  Description:  
 * 
 */
double entropy_production ( const gsl_vector* rho, const gsl_vector* rhoeq,
		const gsl_matrix* L  )
{
	/* l1, l2, l3 */
	double l[3] ; unsigned int i, j ;
	for ( i = 1 ; i < 3 ; i++ )
	{
		l[i] = 0 ;
		for ( j = 0 ; j < 3 ; j++ )
			l[i] += gsl_matrix_get(L,i,j)*VECTOR(rho,j) ;
	}	

	/* L[rho] */
	double Lr = 0 ;
	for ( i = 1 ; i < 3 ; i++ )
		Lr += l[i]*VECTOR(rho,i) ;

	/* L[rhoeq] */
	double Leq = 0 ;
	for ( i = 1 ; i < 3 ; i++ )
		Leq += l[i]*VECTOR(rhoeq,i) ;

	/* r , req */
	double r, req ;
	r = req = 0 ;

	r = gsl_hypot3(VECTOR(rho,1),VECTOR(rho,2),VECTOR(rho,3)) ;
	req = gsl_hypot3(VECTOR(rhoeq,1),VECTOR(rhoeq,2),
		VECTOR(rhoeq,3)) ;

	/* internal entropy s */
	double s ;
	if ( r < 1 && req < 1 )
		s = -(gsl_sf_log((1+r)/(1-r))*Lr/r -gsl_sf_log((1+req)/(1-req))*Leq/req);
	else if ( r < 1 && req >= 0 )
		s = -gsl_sf_log((1+r)/(1-r))*Lr/r ;
	else
		s = 0 ;

	return s;
}		/* -----  end of function entropy_production  ----- */
scalar sasfit_peak_loglogistic_amplitude(scalar x, sasfit_param * param)
{
	scalar a0,z;
	SASFIT_ASSERT_PTR( param );

	SASFIT_CHECK_COND1((SIGMA <= 0), param,  "sigma(%lg) <= 0 ",SIGMA);
	SASFIT_CHECK_COND1((SIGMA >= 1), param,  "sigma(%lg) >= 1 ",SIGMA);

	if ((x<=LOCATION) || fabs(gsl_sf_log((x-LOCATION)/(MU)))/SIGMA > 100) return BCKGR;

	
	MODE = (MU+LOCATION)*pow((1.0-SIGMA)/(1.0+SIGMA),SIGMA);
	SASFIT_CHECK_COND2((MODE <= LOCATION), param,  "mode(%lg) <= location(%lg) ",MODE,LOCATION);

	z = gsl_sf_log((MODE-LOCATION)/(MU))/SIGMA;
	a0 = exp(-z)/pow(1.0+exp(-z),2.0) / (MODE-LOCATION);

	z = gsl_sf_log((x-LOCATION)/(MU))/SIGMA;
	return BCKGR
			+ AMPL/a0 * exp(-z)/pow(1.0+exp(-z),2.0) / (x-LOCATION);
}
Exemple #11
0
int fillpriors(struct_priors *D_priors)
{
	/*Priors*/
	/*K*/
	D_priors->sigma_K=1;               D_priors->phi_K=1/4^2;               /*Gamma  Shape; Scale */
	D_priors->eta_K_p=1;               D_priors->psi_K_o=1;             /*Gamma  Shape; Scale */
	/*r*/
	D_priors->sigma_r=1;               D_priors->phi_r=1/4^2;               /*Gamma  Shape; Scale */
	D_priors->eta_r_p=1;               D_priors->psi_r_o=1;             /*Gamma  Shape; Scale */
	/*nu*/
	D_priors->eta_nu_p=1;              D_priors->psi_nu=1;              /*Gamma  Shape; Scale */

	/*K*//*r*//*nu*//*P*/
	D_priors->K_mu=gsl_sf_log(0.2192928);      D_priors->eta_K_mu=1/3^2;      /*Normal  LMean; Precisions */
	D_priors->r_mu=gsl_sf_log(2.5);            D_priors->eta_r_mu=1/4^2;      /*Normal  LMean; Precisions */
	D_priors->nu_mu=gsl_sf_log(31);            D_priors->eta_nu_mu=1;     /*Normal  LMean; Precisions */
	D_priors->P_mu=gsl_sf_log(0.0001);         D_priors->eta_P_mu=1;   /*Normal  LMean; Precisions */
	/*data2.c*/       

	D_priors->alpha_mu=gsl_sf_log(1);          D_priors->eta_alpha=1/3^2;
	D_priors->beta_mu=gsl_sf_log(1);           D_priors->eta_beta=1/4^2;
	D_priors->p=0.1;    
	D_priors->eta_gamma=1;           	   D_priors->psi_gamma=1;
	D_priors->eta_omega=1;           	   D_priors->psi_omega=1;
	D_priors->eta_upsilon=1;	 	   D_priors->phi_upsilon=1;	    
	D_priors->upsilon_mu=1;			   D_priors->eta_upsilon_mu=1;


return 0;
}
Exemple #12
0
void sample_prior_aux_laplace_multilambda(const gsl_rng *random, double *beta,
                                int dk_rows, int *dk_rowbreaks, int *dk_cols, double *deltak,
                                double *lambda, double *s)
{
    int i;

    mat_dot_vec(dk_rows, dk_rowbreaks, dk_cols, deltak, beta, s);
    vec_abs(dk_rows, s);

    for(i = 0; i < dk_rows; i++){
        s[i] = -gsl_sf_log(lex_ran_flat (random, 0.0, gsl_sf_exp(-lambda[i] * s[i]))) / lambda[i];
    }
}
Exemple #13
0
bool GillespieSimulator::__draw_next_reaction(void)
{
    std::vector<double> a(events_.size());
    // const Real V(world_->volume());
    for (unsigned int idx(0); idx < events_.size(); ++idx)
    {
        // events_[idx].initialize(world_.get());
        a[idx] = events_[idx].propensity();
    }

    const double atot(std::accumulate(a.begin(), a.end(), double(0.0)));
    if (atot == 0.0)
    {
        // Any reactions cannot occur.
        this->dt_ = inf;
        return true;
    }

    const double rnd1(rng()->uniform(0, 1));
    const double dt(gsl_sf_log(1.0 / rnd1) / double(atot));
    const double rnd2(rng()->uniform(0, atot));

    int u(-1);
    double acc(0.0);
    const int len_a(a.size());
    do
    {
        u++;
        acc += a[u];
    } while (acc < rnd2 && u < len_a - 1);

    if (len_a == u)
    {
        // Any reactions cannot occur.
        this->dt_ = inf;
        return true;
    }

    next_reaction_rule_ = events_[u].reaction_rule();
    next_reaction_ = events_[u].draw();
    if (next_reaction_.k() <= 0.0)
    {
        this->dt_ += dt; // skip a reaction
        return false;
    }

    this->dt_ += dt;
    return true;
}
Exemple #14
0
int fillpara(struct_para *D_para, struct_data *D,struct_priors *D_priors)
{
  int l,m,mm;
  double SUM=0,SUMa=0;
  /*initials*/
  /*K*/
for (l=0;l<D->L;l++){
    for (m=0;m<D->NoORF[l];m++){
      mm=D->NoSUM[l]+m;
      if(D->y[l*D->M*D->N + m*D->N + D->NoTIME[mm]-1]<=0){D_para->K_lm[mm]=D_priors->P_mu;SUM+=D_para->K_lm[mm];}
	else{     
	  D_para->K_lm[mm]=gsl_sf_log(D->y[l*D->M*D->N + m*D->N + D->NoTIME[mm]-1]);SUM+=D_para->K_lm[mm];
	}
    }
    D_para->K_o_l[l]=SUM/D->NoORF[l];
    SUM=0;
    SUMa+=D_para->K_o_l[l];
  }
 D_para->K_p=SUMa/D->L;       /*LMean*/

  for (l=0;l<D->L;l++)          {D_para->tau_K_l[l]=D_priors->sigma_K;}                  /*Precision*/

/*LMean*/
  D_para->sigma_K_o=D_priors->eta_K_o;               /*Precision*/

  /*r*/
  for (l=0;l<D->L;l++){
    for (m=0;m<D->NoORF[l];m++){
      mm=D->NoSUM[l]+m;
      D_para->r_lm[mm]=D_priors->r_mu;
    }
  }                          /*LMean*/

  for (l=0;l<D->L;l++)          {D_para->tau_r_l[l]=D_priors->sigma_r;}                  /*Precision*/

  for (l=0;l<D->L;l++)          {D_para->r_o_l[l]=D_priors->r_mu;}        /*LMean*/
  D_para->sigma_r_o=D_priors->eta_r_o;               /*Precision*/

  D_para->r_p=D_priors->r_mu;       /*LMean*/

  /*nu*/
  for (l=0;l<D->L;l++)          {D_para->nu_l[l]=D_priors->nu_mu;}                      /*LMean*/
  D_para->sigma_nu=D_priors->eta_nu;   /*Precision for lMean*/

  D_para->nu_p=D_priors->nu_mu;   /*LMean*/
  /*P*/
  D_para->P=D_priors->P_mu;      /*LMean*/
  return 0;
}
Exemple #15
0
double sample_lambda_cauchy(const gsl_rng *random, double *beta,
                                  int dk_rows, int *dk_rowbreaks, int *dk_cols, double *dk_vals,
                                  double a, double b,
                                  double lam0, double lam_walk_stdev)
{
    int i;
    double lam1;
    double sum_term;
    double dotprod;
    double accept_ratio;
    double log_accept_ratio;
    int prev_break;

    lam1 = gsl_sf_exp(gsl_ran_gaussian(random, lam_walk_stdev) + gsl_sf_log(lam0));

    /* Lambda as an inverse scale parameter */
    sum_term = 0;
    prev_break = 0;
    for (i = 0; i < dk_rows; i++){
        dotprod = fabs(vec_dot_beta(dk_rowbreaks[i] - prev_break, dk_cols + prev_break, dk_vals + prev_break, beta));
        dotprod *= dotprod;
        sum_term += gsl_sf_log(1.0 + lam1 * lam1 * dotprod) - gsl_sf_log(1.0 + lam0 * lam0 * dotprod);
        prev_break = dk_rowbreaks[i];
    }
    log_accept_ratio = (a - 1 + dk_rows) * (gsl_sf_log(lam1) - gsl_sf_log(lam0)) - b * (lam1 - lam0) - sum_term;

    if(log_accept_ratio < -20)
        return lam0;
    else if(log_accept_ratio > 0)
        return lam1;

    accept_ratio = gsl_sf_exp(log_accept_ratio);
    if (gsl_ran_flat(random, 0, 1) <= accept_ratio)
        return lam1;
    return lam0;    
}
scalar sasfit_peak_LogLogisticArea(scalar x, sasfit_param * param)
{
	scalar z ;

	SASFIT_ASSERT_PTR( param );

	SASFIT_CHECK_COND1((SIGMA <= 0), param,  "sigma(%lg) <= 0 ",SIGMA);
	SASFIT_CHECK_COND1((SIGMA >= 1), param,  "sigma(%lg) >= 1 ",SIGMA);

	MODE = (MU+LOCATION)*pow((1.0-SIGMA)/(1.0+SIGMA),SIGMA);
	SASFIT_CHECK_COND2((MODE <= LOCATION), param,  "mode(%lg) <= location(%lg), you should increase the value for mu.",MODE,LOCATION);

	if (x<=LOCATION) return BCKGR;

	z = gsl_sf_log((x-LOCATION)/(MU))/SIGMA;
	return BCKGR
			+ AREA/SIGMA * exp(-z)/pow(1.0+exp(-z),2.0) / (x-LOCATION);
}
Exemple #17
0
// print out the CDF given a number of points.
// normalize to the given constant
void print_cdf(struct wpoint **points, unsigned npoints,
               double total_mass,
               struct wpoint *first_point, // needed to give each point an index
               const char *dist_name,
               FILE *cdf_fh)
{
    unsigned dim;
    char fmt[] = "%s\t%c\t%20.18le\t%u\t%20.18le\t%20.18le\t%20.18le\t%s\n";
    char base[] = "ACGT";
    struct wpoint *s, **sp, **se = points + npoints;

    double sum, ln_z = gsl_sf_log(total_mass);

    double ln_sum, max_ln_step = -INFINITY;
    for (dim = 0; dim != NUCS; ++dim)
    {
        cmp_dim = dim;
        qsort(points, npoints, sizeof(struct wpoints *), pcmp5);
        ln_sum = NAN;
        sum = 0;

        for (sp = points; sp != se; ++sp)
        {
            s = *sp;
            max_ln_step = max(max_ln_step, s->ln_o - ln_z);
            /* ln_sum = safe_sum(ln_sum, s->ln_o - gsl_sf_log(s->num_samples)); */
            sum += s->o / (double)s->num_samples;
            fprintf(cdf_fh, fmt, 
                    dist_name,
                    base[dim],
                    s->x[dim],
                    (unsigned)(s - first_point),
                    /* gsl_sf_exp(ln_sum - ln_z), */
                    sum / total_mass,
                    s->ln_d,
                    s->ln_o,
                    s->dist);
        }
    }
    fprintf(stderr, "%s, max_ln_step = %g, max_step = %g\n", dist_name, max_ln_step, gsl_sf_exp(max_ln_step));

}
Exemple #18
0
void sample_prior_aux_doublepareto(const gsl_rng *random, double *beta, 
                                   int dk_rows, int *dk_rowbreaks, int *dk_cols, double *deltak,
                                   double lambda, double dp_hyperparameter, double *s)
{
    int i;
    double z;
    double dp_exponent;

    dp_exponent = -(dp_hyperparameter+1);
    mat_dot_vec(dk_rows, dk_rowbreaks, dk_cols, deltak, beta, s);
    vec_abs(dk_rows, s);

    for(i = 0; i < dk_rows; i++){
        /* Lambda as an inverse scale parameter */
        z = pow(1. + lambda * s[i] / dp_hyperparameter, dp_exponent);
        s[i] = dp_hyperparameter / lambda * (gsl_sf_exp(gsl_sf_log(lex_ran_flat(random, 0, z)) / dp_exponent) - 1);
        
        /* Lambda as a scale parameter
        z = pow(1. + s[i] / (dp_hyperparameter * lambda), -dp_hyperparameter - 1.);
        s[i] = dp_hyperparameter * lambda * (gsl_sf_exp(-gsl_sf_log(lex_ran_flat(random, 0, z)) / (dp_hyperparameter + 1.0)) - 1);
        */
    }
}
Exemple #19
0
void bayes_gfl_poisson_doublepareto (int n, int *obs,
                                      int dk_rows, int *dk_rowbreaks, int *dk_cols, double *deltak,
                                      double lambda_hyperparam_a, double lambda_hyperparam_b,
                                      double lam_walk_stdev, double lam0, double dp_hyperparameter,
                                      long iterations, long burn, long thin,
                                      double **beta_samples, double *lambda_samples)
{
    int i;
    double *s;
    double *beta;
    int **coefs;
    int *coef_breaks;
    long iteration;
    int sample_idx;
    double ymean;
    const gsl_rng_type *T;
    gsl_rng *random;
    double lambda;

    gsl_rng_env_setup();
    T = gsl_rng_default;
    random = gsl_rng_alloc (T);

    s = (double *) malloc(dk_rows * sizeof(double));
    coefs = (int **) malloc(n * sizeof(int*));
    coef_breaks = (int *) malloc(n * sizeof(int));
    beta = (double *) malloc(n * sizeof(double));
    lambda = lam0;

    /* Cache a lookup table to map from deltak column to the set of rows with 
       non-zero entries for that column */
    calc_coefs(n, dk_rows, dk_rowbreaks, dk_cols, coefs, coef_breaks);

    /* Set all beta values to the mean to start */
    ymean = gsl_sf_log(vec_mean_int(n, obs));
    for (i = 0; i < n; i++){ beta[i] = ymean; }

    /* Run the Gibbs sampler */
    for (iteration = 0, sample_idx = 0; iteration < iterations; iteration++)
    {
        /* Sample the lambda penalty weight on the double Pareto prior */
        lambda = sample_lambda_doublepareto(random, beta,
                                            dk_rows, dk_rowbreaks, dk_cols, deltak,
                                            lambda_hyperparam_a, lambda_hyperparam_b,
                                            lambda, dp_hyperparameter, lam_walk_stdev);

        /* Sample each of the auxillary variables (one per row of Dk) */
        sample_prior_aux_doublepareto(random, beta, dk_rows, dk_rowbreaks, dk_cols, deltak, lambda, dp_hyperparameter, s);
        
        /* Sample from the Poisson likelihood */
        sample_likelihood_poisson(random, n, obs, dk_rowbreaks, dk_cols, deltak, s, coefs, coef_breaks, beta);
        
        /* Add the sample */
        if (iteration >= burn && (iteration % thin) == 0){
            lambda_samples[sample_idx] = lambda;
            for(i = 0; i < n; i++){ beta_samples[sample_idx][i] = gsl_sf_exp(beta[i]); }
            sample_idx++;
        }
    }

    free(s);
    free(beta);
    for (i = 0; i < n; i++){ free(coefs[i]); }
    free(coefs);
    free(coef_breaks);

    gsl_rng_free(random);
}
Exemple #20
0
void empirical_bayes_gfl_binomial_laplace_gamma (int n, int *trials, int *successes,
                                int dk_rows, int *dk_rowbreaks, int *dk_cols, double *deltak,
                                double lambda,
                                long iterations, long burn, long thin,
                                double **beta_samples, double *lambda_samples)
{
    int i;
    double *s;
    double *beta;
    double *tau;
    int **coefs;
    int *coef_breaks;
    long iteration;
    int sample_idx;
    double ymean;
    const gsl_rng_type *T;
    gsl_rng *random;

    gsl_rng_env_setup();
    T = gsl_rng_default;
    random = gsl_rng_alloc (T);

    s = (double *) malloc(dk_rows * sizeof(double));
    coefs = (int **) malloc(n * sizeof(int*));
    coef_breaks = (int *) malloc(n * sizeof(int));
    beta = (double *) malloc(n * sizeof(double));
    tau = (double *) malloc(dk_rows * sizeof(double));

    /* Cache a lookup table to map from deltak column to the set of rows with 
       non-zero entries for that column */
    calc_coefs(n, dk_rows, dk_rowbreaks, dk_cols, coefs, coef_breaks);

    /* Set all beta values to the mean to start */
    ymean = 0;
    for (i = 0; i < n; i++){ if (successes[i] > 0) {ymean += trials[i] / (double)successes[i];} }
    ymean = ymean > 0 ? -gsl_sf_log(ymean / (double)n) : 0;
    for (i = 0; i < n; i++){ beta[i] = ymean; }

    /* Set tau to 1 to start */
    for (i = 0; i < dk_rows; i++){ tau[i] = 1.0; }

    /* Run the Gibbs sampler */
    for (iteration = 0, sample_idx = 0; iteration < iterations; iteration++)
    {
        /* Sample the local laplace penalty tau */
        sample_tau_laplace_gamma(random, beta, dk_rows, dk_rowbreaks, dk_cols, deltak, lambda, 1.0, tau);
        
        /* Sample each of the auxillary variables (one per row of Dk) */
        sample_prior_aux_laplace_multilambda(random, beta, dk_rows, dk_rowbreaks, dk_cols, deltak, tau, s);
        
        /* Sample from the binomial likelihood */
        sample_likelihood_binomial(random, n, trials, successes, dk_rowbreaks, dk_cols, deltak, s, coefs, coef_breaks, beta);
        
        /* Add the sample */
        if (iteration >= burn && (iteration % thin) == 0){
            lambda_samples[sample_idx] = lambda;
            for(i = 0; i < n; i++){ beta_samples[sample_idx][i] = 1.0 / (1.0 + gsl_sf_exp(-CLAMP(beta[i], BINOMIAL_BETA_MIN, BINOMIAL_BETA_MAX))); }
            sample_idx++;
        }
    }

    free(s);
    free(beta);
    free(tau);
    for (i = 0; i < n; i++){ free(coefs[i]); }
    free(coefs);
    free(coef_breaks);

    gsl_rng_free(random);
}
Exemple #21
0
/* Initializes MPI,
 * loads defaults, 
 *       command line arguments,
 *       hdf5 data,
 *       ode model from shared library @code dlopen@
 * allocates kernel, 
 *           ode model parameters
 *           MPI communivcation buffers
 * calls MCMC routines
 * finalizes and frees (most) structs
 */
int/*always returns success*/
main(int argc,/*count*/ char* argv[])/*array of strings*/ {
  int i=0;
  int warm_up=0; // sets the number of burn in points at command line
  char lib_name[BUFSZ];
  ode_model_parameters omp[1];
  omp->size=(problem_size*) malloc(sizeof(problem_size));

  char global_sample_filename_stem[BUFSZ]="Sample.h5"; // filename basis
  char rank_sample_file[BUFSZ]; // filename for sample output
  char resume_filename[BUFSZ]="resume.h5";
  double seed = 1;
  double gamma= 2;
  double t0=-1;
  int sampling_action=SMPL_FRESH;
  
  int start_from_prior=no;
  int sensitivity_approximation=no;

  main_options cnf_options=get_default_options(global_sample_filename_stem, lib_name);
  
  MPI_Init(&argc,&argv);
  int rank,R;
  MPI_Comm_size(MPI_COMM_WORLD,&R);
  MPI_Comm_rank(MPI_COMM_WORLD,&rank);
  char *h5file=NULL;

  gsl_set_error_handler_off();
  
  /* process command line arguments
   */  
  for (i=0;i<argc;i++){
    if (strcmp(argv[i],"-p")==0 || strcmp(argv[i],"--prior-start")==0) {
      start_from_prior=1;
    } else if (strcmp(argv[i],"-d")==0 || strcmp(argv[i],"--hdf5")==0) {
      h5file=argv[i+1];
    } else if (strcmp(argv[i],"-t")==0 || strcmp(argv[i],"--init-at-t")==0) {
      t0=strtod(argv[i+1],NULL);
      //printf("[main] t0=%f\n",t0);
    } else if (strcmp(argv[i],"-w")==0 || strcmp(argv[i],"--warm-up")==0) warm_up=strtol(argv[i+1],NULL,10);
    else if (strcmp(argv[i],"--resume")==0 || strcmp(argv[i],"-r")==0) sampling_action=SMPL_RESUME;
    else if (strcmp(argv[i],"--sens-approx")==0) sensitivity_approximation=1;
    else if (strcmp(argv[i],"-l")==0) strcpy(cnf_options.library_file,argv[i+1]);
    //    else if (strcmp(argv[i],"-n")==0) Tuning=0;
    else if (strcmp(argv[i],"-s")==0) cnf_options.sample_size=strtol(argv[i+1],NULL,0);
    else if (strcmp(argv[i],"-o")==0) strncpy(cnf_options.output_file,argv[i+1],BUFSZ);
    else if (strcmp(argv[i],"-a")==0) cnf_options.target_acceptance=strtod(argv[i+1],NULL);
    else if (strcmp(argv[i],"-i")==0 || strcmp(argv[i],"--initial-step-size")==0) cnf_options.initial_stepsize=strtod(argv[i+1],NULL);
    else if (strcmp(argv[i],"-m")==0 || strcmp(argv[i],"--initial-step-size-rank-multiplier")==0) cnf_options.initial_stepsize_rank_factor=strtod(argv[i+1],NULL);

    else if (strcmp(argv[i],"-g")==0) gamma=strtod(argv[i+1],NULL);
    else if (strcmp(argv[i],"--abs-tol")==0) cnf_options.abs_tol=strtod(argv[i+1],NULL);
    else if (strcmp(argv[i],"--rel-tol")==0) cnf_options.rel_tol=strtod(argv[i+1],NULL);
    else if (strcmp(argv[i],"--seed")==0) seed=strtod(argv[i+1],NULL);
    else if (strcmp(argv[i],"-h")==0 || strcmp(argv[i],"--help")==0) {
      print_help();
      MPI_Abort(MPI_COMM_WORLD,0);
    }
  }
  
  seed=seed*137+13*rank;

  /* load Data from hdf5 file
   */
  if (h5file){
    printf("# [main] (rank %i) reading hdf5 file, loading data.\n",rank);
    fflush(stdout);
    read_data(h5file,omp);
    fflush(stdout);
  } else {
    fprintf(stderr,"# [main] (rank %i) no data provided (-d option), exiting.\n",rank);
    MPI_Abort(MPI_COMM_WORLD,-1);
  }
    
  /* load model from shared library
   */
  ode_model *odeModel = ode_model_loadFromFile(lib_name);  /* alloc */
  if (!odeModel) {
    fprintf(stderr, "# [main] (rank %i) Library %s could not be loaded.\n",rank,lib_name);
    exit(1);
  } else printf( "# [main] (rank %i) Library %s loaded.\n",rank, lib_name);
  
  /* construct an output file from rank, library name, and user
   * supplied string.
   */
  char *dot;
  char *lib_base;
  lib_base=basename(lib_name);
  dot=strchr(lib_base,'.');
  dot[0]='\0';
  sprintf(resume_filename,"%s_resume_%02i.h5",lib_base,rank);
  sprintf(rank_sample_file,"mcmc_rank_%02i_of_%i_%s_%s",rank,R,lib_base,basename(cnf_options.output_file));
  cnf_options.output_file=rank_sample_file;
  cnf_options.resume_file=resume_filename;
  
  /* allocate a solver for each experiment for possible parallelization
   */
  ode_solver **solver;
  int c,C=omp->size->C;
  int c_success=0;
  solver=malloc(sizeof(ode_solver*)*C);
  for (c=0;c<C;c++){
    solver[c]=ode_solver_alloc(odeModel);
    if (solver[c]) c_success++;
  }
  if (c_success==C) {
    printf("# [main] Solver[0:%i] for «%s» created.\n",C,lib_base);
  } else {
    fprintf(stderr, "# [main] Solvers for «%s» could not be created.\n",lib_base);
    ode_model_free(odeModel);
    MPI_Abort(MPI_COMM_WORLD,-1);
  }

  /* sensitivity analysis is not feasible for large models. So, it can
   *  be turned off.
   */
  if (sensitivity_approximation){
    //printf("# [main] experimental: Sensitivity approximation activated.\n");
    for (c=0;c<C;c++) ode_solver_disable_sens(solver[c]);
    /* also: make sensitivity function unavailable; that way
     * ode_model_has_sens(model) will return «FALSE»;
     */
    odeModel->vf_sens=NULL;
  }
  
  /* init solver 
   */
  realtype solver_param[3] = {cnf_options.abs_tol, cnf_options.rel_tol, 0};

  const char **x_name=ode_model_get_var_names(odeModel);
  const char **p_name=ode_model_get_param_names(odeModel);
  const char **f_name=ode_model_get_func_names(odeModel);
  
  /* local variables for parameters and inital conditions as presented
     in ode model lib: */
  int N = ode_model_getN(odeModel);
  int P = ode_model_getP(odeModel);
  int F = ode_model_getF(odeModel);

  /* save in ode model parameter struct: */
  set_number_of_state_variables(omp,N);
  set_number_of_model_parameters(omp,P);
  set_number_of_model_outputs(omp,F);

  omp->t0=t0;
  /* ode model parameter struct has pointers for sim results that need
     memory allocation: */
  ode_model_parameters_alloc(omp);
  ode_model_parameters_link(omp);
  fflush(stdout);

  /* get default parameters from the model file
   */
  double p[P];
  gsl_vector_view p_view=gsl_vector_view_array(p,P);
  ode_model_get_default_params(odeModel, p, P);
  if (rank==0)  gsl_printf("default parameters",&(p_view.vector),GSL_IS_DOUBLE | GSL_IS_VECTOR);
  omp->solver=solver;

  /* All MCMC meta-parameters (like stepsize) here are positive (to
   * make sense). Some command line arguments can override parameters
   * read from files; but, input files are processed after the command
   * line parameters. So, to check whether default parameters were
   * altered by the command line, the variable declaration defaults
   * are negative at first. Alterations to some meta-parameter p can
   * be checked by: if (cnf_options.p<0)
   * cnf_options.p=read_from_file(SOME FILE);
   */
  cnf_options.initial_stepsize=fabs(cnf_options.initial_stepsize);
  cnf_options.target_acceptance=fabs(cnf_options.target_acceptance);
  cnf_options.sample_size=fabs(cnf_options.sample_size);

  /* load default initial conditions
   */
  double y[N];
  gsl_vector_view y_view=gsl_vector_view_array(y,N);
  ode_model_get_initial_conditions(odeModel, y, N);
  
  print_experiment_information(rank,R,omp,&(y_view.vector));

  /* initialize the ODE solver with initial time t, default ODE
   * parameters p and default initial conditions of the state y; In
   * addition error tolerances are set and sensitivity initialized.
   */
  //printf("# [main] (rank %i) init ivp: t0=%g\n",rank,omp->t0);
  for (c=0;c<C;c++){
    ode_solver_init(solver[c], omp->t0, omp->E[c]->init_y->data, N, p, P);
    //printf("# [main] solver initialised.\n");    
    ode_solver_setErrTol(solver[c], solver_param[1], &solver_param[0], 1);
    if (ode_model_has_sens(odeModel)) {
      ode_solver_init_sens(solver[c], omp->E[0]->yS0->data, P, N);
    }
  }
  /* An smmala_model is a struct that contains the posterior
   * probablity density function and a pointer to its parameters and
   * pre-allocated work-memory.
   */
  smmala_model* model = smmala_model_alloc(LogPosterior, NULL, omp);
  if (model){
    printf("[main] (rank %i) smmala_model allocated.\n",rank);
  }else{
    fprintf(stderr,"[main] (rank %i) smmala_model could not be allocated.\n",rank);
    MPI_Abort(MPI_COMM_WORLD,-1);
  }
  
  /* initial parameter values; after allocating an mcmc_kernel of the
   * right dimensions we set the initial Markov chain state from
   * either the model's default parametrization p, the prior's μ, or the
   * state of a previously completed mcmc run (resume).
   */
  int D=omp->size->D;
  double init_x[D];
  double beta=assign_beta(rank,R,round(gamma));
  double tgac=cnf_options.target_acceptance;
  double m=cnf_options.initial_stepsize_rank_factor;
  double step=cnf_options.initial_stepsize;
  if (m>1.0 && rank>0) step*=gsl_pow_int(m,rank);
  pdf_normalisation_constant(omp);
  printf("[main] (rank %i) likelihood log(normalisation constant): %g\n",rank,omp->pdf_lognorm);
  mcmc_kernel* kernel = smmala_kernel_alloc(beta,D,step,model,seed,tgac);
  
  int resume_load_status;
  if (sampling_action==SMPL_RESUME){
    resume_load_status=load_resume_state(resume_filename, rank, R, kernel);
    assert(resume_load_status==EXIT_SUCCESS);
    for (i=0;i<D;i++) init_x[i]=kernel->x[i];
  } else if (start_from_prior){     
    if (rank==0) printf("# [main] setting initial mcmc vector to prior mean.\n");
    for (i=0;i<D;i++) init_x[i]=gsl_vector_get(omp->prior->mu,i);
  } else {
    if (rank==0) printf("# [main] setting mcmc initial value to log(default parameters)\n");
    for (i=0;i<D;i++) init_x[i]=gsl_sf_log(p[i]);
  }
  fflush(stdout);
  //display_prior_information(omp->prior);
  
  /* here we initialize the mcmc_kernel; this makes one test
   * evaluation of the log-posterior density function. 
   */
  
  /*
  if (rank==0){
    printf("# [main] initializing MCMC.\n");
    printf("# [main] init_x:");
    for (i=0;i<D;i++) printf(" %g ",init_x[i]);
    printf("\n");
  }
  */
  
  mcmc_init(kernel, init_x);
  /* display the results of that test evaluation
   *
   */
  if (rank==0){
    printf("# [main] rank %i init complete .\n",rank);
    display_test_evaluation_results(kernel);
    ode_solver_print_stats(solver[0], stdout);
    fflush(stdout);
    fflush(stderr);  
  }
  
  size_t SampleSize = cnf_options.sample_size;  
  
  /* in parallel tempering th echains can swap their positions;
   * this buffers the communication between chains.
   */
  void *buffer=(void *) smmala_comm_buffer_alloc(D);
  
  /* Initialization of burin in length
   */
  size_t BurnInSampleSize;
  if (warm_up==0){
    BurnInSampleSize = 7 * (int) sqrt(cnf_options.sample_size);
  } else {
    BurnInSampleSize=warm_up;
  }
  if (rank==0){
    printf("# Performing Burn-In with step-size (%g) tuning: %lu iterations\n",get_step_size(kernel),BurnInSampleSize);
    fflush(stdout);
  }
  /* Burn In: these iterations are not recorded, but are used to find
   * an acceptable step size for each temperature regime.
   */
  int mcmc_error;
  mcmc_error=burn_in_foreach(rank,R, BurnInSampleSize, omp, kernel, buffer);
  assert(mcmc_error==EXIT_SUCCESS);
  if (rank==0){
    fprintf(stdout, "\n# Burn-in complete, sampling from the posterior.\n");
  }
  /* this struct contains all necessary id's and size arrays
   * for writing sample data to an hdf5 file in chunks
   */
  hdf5block_t *h5block = h5block_init(cnf_options.output_file,
				      omp,SampleSize,
				      x_name,p_name,f_name);
  
  /* The main loop of MCMC sampling
   * these iterations are recorded and saved to an hdf5 file
   * the file is set up and identified via the h5block variable.
   */  
  mcmc_error=mcmc_foreach(rank, R, SampleSize, omp, kernel, h5block, buffer, &cnf_options);
  assert(mcmc_error==EXIT_SUCCESS);
  append_meta_properties(h5block,&seed,&BurnInSampleSize, h5file, lib_base);
  h5block_close(h5block);

  /* clear memory */
  smmala_model_free(model);
  mcmc_free(kernel);
  ode_model_parameters_free(omp);
  MPI_Finalize();
  return EXIT_SUCCESS;
}
Exemple #22
0
int fillpara(struct_para *D_para, struct_data *D)
{
int c,l,m,mm;
	/*initials*/
	/*K*/
	for (c=0;c<2;c++){
		for (l=0;l<D->L;l++){
			for (m=0;m<D->NoORF[c*D->L+l];m++){
				mm=D->NoSUM[c*D->L+l]+m;
				D_para->K_clm[mm]=D->y[c*D->SHIFTlmn+l*D->M*D->N + m*D->N + D->NoTIME[mm]-1];
				if(D_para->K_clm[mm]>0){D_para->K_clm[mm]=gsl_sf_log(D_para->K_clm[mm]);}
			}
		}
	}

	for (l=0;l<(2*D->L);l++)          {D_para->tau_K_cl[l]=1/(0.4*0.4);}                  /*Precision*/

	for (l=0;l<D->L;l++)          {D_para->K_o_l[l]=gsl_sf_log(0.25);}        /*LMean*/
	D_para->sigma_K_o=1/(0.6*0.6);               /*Precision*/
	D_para->K_p=gsl_sf_log(0.1);       /*LMean*/

	/*r*/
	for (l=0;l<D->L;l++){
		for (m=0;m<D->NoORF[l];m++){
			mm=D->NoSUM[c*D->L+l]+m;
			D_para->r_clm[mm]=gsl_sf_log(2.5);
		}
	}                          /*LMean*/

	for (l=0;l<2*D->L;l++)          {D_para->tau_r_cl[l]=15;}                  /*Precision*/

	for (l=0;l<D->L;l++)          {D_para->r_o_l[l]=gsl_sf_log(2.5);}        /*LMean*/
	D_para->sigma_r_o=16;               /*Precision*/

	D_para->r_p=gsl_sf_log(2.5);       /*LMean*/

	/*nu*/
	for (l=0;l<D->L;l++)          {D_para->nu_l[l]=18;}                      /*LMean*/
	D_para->sigma_nu=0.0025;   /*Precision for lMean*/

	D_para->nu_p=18;   /*LMean*/
	/*P*/
	D_para->P=gsl_sf_log(0.0001);      /*LMean*/
	
	for (l=0;l<D->L;l++)          {D_para->gamma[l]=0;} 

	for (l=0;l<D->L;l++)          {D_para->omega[l]=0;}
	for (l=0;l<D->L;l++)          {D_para->delta[l]=1;}/*!*/  
 
	D_para->alpha[0]=gsl_sf_log(1);
	D_para->beta[0]=gsl_sf_log(1);
	D_para->alpha[1]=gsl_sf_log(1);
	D_para->beta[1]=gsl_sf_log(1);  
	D_para->sigma_gamma=1;
	D_para->sigma_omega=1;
	D_para->upsilon_c[0]=1; 
	D_para->upsilon_c[1]=1;       D_para->sigma_upsilon=1;
	D_para->upsilon_p=1;


return 0;
}
Exemple #23
0
double I0(double y0, double y)
{
	return gsl_sf_log((exp(y0+y)-1.0)/(exp(y0-y)-1.0))-y;
}
Exemple #24
0
//--------------------------------------------------------------------------------------------------
double multilog_kernel( const double& h, const double& R ) {
  return gsl_sf_log( h * h + R * R );
}
Exemple #25
0
/** ****************************************************************************************************
 ***** calc an individual logistic regression model 
 *******************************************************************************************************/
void calc_node_Score_binary_rv_R(network *dag, datamatrix *obsdata, int nodeid,  int errverbose,
                                datamatrix *designmatrix, const double priormean, const double priorsd, const double priorgamshape, const double priorgamscale,
                                const int maxiters, const double epsabs,int storeModes, double epsabs_inner, int maxiters_inner, double finitestepsize, int verbose,
				 double h_guess, double h_epsabs, int maxiters_hessian, int ModesONLY,
				 double max_hessian_error,double myfactor_brent, int maxiters_hessian_brent, double num_intervals_brent)
{
#ifdef NOPRIOR
Rprintf("############ Warning - Priors turned off - use only for checking mlik value! ################\n");
#endif
  
  int i,status=GSL_SUCCESS,sss,index=0,iter;
  /*int j;*/
  gsl_vector *myBeta,*vectmp1,*vectmp2,*vectmp1long,*vectmp2long,*localbeta,*localbeta2,/* *dgvalues,*/ *finitefactors,/* *factorindexes,*/ *finitestepsize_vec=0,*nmstepsize=0;/** this will hold the parameter point estimates + 1 is for precision of rv's */
  struct fnparams gparams;/** for passing to the gsl zero finding functions */
  double gvalue;double nm_size=0.0;
  gsl_matrix *mattmp2,*mattmp3,*mattmp4,*hessgvalues,*hessgvalues3pt;
  double mydet=0.0,logscore=0.0;/*,logscore3pt=0.0;*/
  gsl_permutation *initsperm;
  gsl_permutation *perm=0; 
  const gsl_multimin_fminimizer_type *T;
       gsl_multimin_fminimizer *s;
     gsl_multimin_function F; 
 
  double lower,upper,lower_f,upper_f;int found=0; double delta=0.0,new_f_min=0.0, finitestepsize_nm=0.0, increLogscale=0.0, best_Error=0.0,best_h=0.0;
 
  const gsl_min_fminimizer_type *T1;
  gsl_min_fminimizer *s1; 
  int n,m;
 /* double min_error,cur_error,accurate_logscore=0,accurate_logscore3pt=0,bestsize=0,lowerend,upperend,h_guess,h_epsabs;*/
  /*const gsl_min_fminimizer_type *T;
  gsl_min_fminimizer *s;
  gsl_function F;*/ 
  /*double h_lowerbound[1],h_upperbound[1],h_guess_array[1];
  int h_nbd[1];*/
  int nDim;/** dimension of optim problem */
  int *nbd;/** nbd is an integer array of dimension nDim.
	                                      On entry nbd represents the type of bounds imposed on the variables, and must be specified as follows:
	                                      nbd(i)=0 if x(i) is unbounded,
		                              1 if x(i) has only a lower bound,
		                              2 if x(i) has both lower and upper bounds, and
		                              3 if x(i) has only an upper bound.
	                                      On exit nbd is unchanged.*/
  
  double *lowerbounds,*upperbounds; /* h_gvalue;*//*,lowestHesserror,beststepsize;*/
  int failcode;/** check code see R ?optim - if non-zero then a problem **/
  double factr=1e-07;/** error size scaler - this is the default value*/
  double pgtol=1e-07;/** default value is zero - this is the gradient tolerance - mmm what does that actually mean? */
  int fncount,grcount;/** hold number of evaluations */
  char msg[60];/** error message */
  int trace=errverbose;/** like verbose */
  int nREPORT=1000;/** report freq*/
  int lmm=5;/** see R ?optim - number of function evals to store - default is 5 */
  /** want to find the modes of the function g(betas) where betas=b_0,b_1,,...,tau, the latter being precision */
  /** g(betas) is not differentiable as it contains integrals (which themselves need Laplace estimates **/
  
    
  /** SETUP things which are the same across all data chunks - groups  */
  /** build design matrix which is designmatrix->datamatrix=X, designmatrix->Y=Y plus priors designmatrix->priorsd, designmatrix->priormean **/
  /** NOTE: design matrix here does include the random effect term **/
  /** note - numparams does NOT include precision term - numpars +1 */
  build_designmatrix_rv(dag,obsdata,priormean, priorsd,priorgamshape,priorgamscale,designmatrix,nodeid,storeModes);
  
  nDim=designmatrix->numparams+1; 
  lowerbounds=(double *)R_alloc(nDim,sizeof(double*));
  upperbounds=(double *)R_alloc(nDim,sizeof(double*));
  nbd=(int *)R_alloc(nDim,sizeof(int*));
  for(i=0;i<nDim-1;i++){lowerbounds[i]=-DBL_MAX;
                        upperbounds[i]=DBL_MAX;
			nbd[i]=0;}
			nbd[nDim-1]=1;lowerbounds[nDim-1]=0.001;/** lower bound for precision */
  finitefactors = gsl_vector_alloc(7);/** used to change stepsize in hessian estimate **/			
  gsl_vector_set(finitefactors,0,1.0E-03);gsl_vector_set(finitefactors,1,1.0E-02);gsl_vector_set(finitefactors,2,1.0E-01);
  gsl_vector_set(finitefactors,3,1.0);gsl_vector_set(finitefactors,4,1.0E+01);gsl_vector_set(finitefactors,5,1.0E+02);
  gsl_vector_set(finitefactors,6,1.0E+03);
  
  /*factorindexes = gsl_vector_alloc(7);*//** used to change stepsize in hessian estimate **/			
  /*for(i=0;i<7;i++){gsl_vector_set(factorindexes,i,i);}*/
  
  /** change finite.step.size by 0.1,1, and 10 factors respectively **/
  
  /*dgvalues = gsl_vector_alloc (designmatrix->numparams+1);*//** inc rv precision */
  
  myBeta = gsl_vector_alloc (designmatrix->numparams+1);/** inc rv precision */
  vectmp1 = gsl_vector_alloc (designmatrix->numparams);/** scratch space **/
  vectmp2 = gsl_vector_alloc (designmatrix->numparams);/** scratch space **/
  mattmp2 = gsl_matrix_alloc (obsdata->numDataPts,designmatrix->numparams);
  mattmp3 = gsl_matrix_alloc (designmatrix->numparams,designmatrix->numparams);
  mattmp4 = gsl_matrix_alloc (designmatrix->numparams,designmatrix->numparams);
  initsperm = gsl_permutation_alloc (designmatrix->numparams);/** for use with initial guesses */
  vectmp1long = gsl_vector_alloc (obsdata->numDataPts);/** scratch space **/
  vectmp2long = gsl_vector_alloc (obsdata->numDataPts);
  localbeta = gsl_vector_alloc (designmatrix->numparams);/** scratch space in later functions - excl. precision **/
  localbeta2 = gsl_vector_alloc (designmatrix->numparams+1);/** scratch space in later functions - inc. precision **/
  
  hessgvalues = gsl_matrix_alloc (designmatrix->numparams+1,designmatrix->numparams+1);
  hessgvalues3pt = gsl_matrix_alloc (designmatrix->numparams+1,designmatrix->numparams+1);
  
  gparams.designdata=designmatrix;
  
   gparams.vectmp1=vectmp1;
   gparams.vectmp2=vectmp2;
   gparams.mattmp2=mattmp2;
   gparams.mattmp3=mattmp3;
   gparams.mattmp4=mattmp4;
   gparams.perm=initsperm;
   gparams.vectmp1long=vectmp1long;
   gparams.vectmp2long=vectmp2long;
   gparams.beta=localbeta;
   gparams.betaincTau=localbeta2;
   gparams.epsabs_inner=epsabs_inner;
   gparams.maxiters_inner=maxiters_inner;
   gparams.verbose=verbose;
   gparams.finitestepsize=finitestepsize;
   
   dag->nodeScoresErrCode[nodeid]=0;/** reset error code to no error **/
   
   /*status=GSL_SUCCESS;*/
   generate_rv_inits(myBeta,&gparams);
   /*Rprintf("starting optimisation\n");*/
   /** run a loop over different stepsize - starting with the smallest first as this is more likely successful **/
   for(i=0;i<finitefactors->size;i++){/** go forwards through the factors so start with SMALLEST STEPSIZE */
   /*Rprintf("step size iteration %d\n",i);*/
     failcode=0;/** reset*/
    gparams.finitestepsize=gsl_vector_get(finitefactors,i)*finitestepsize;
   
     lbfgsb(nDim, lmm, myBeta->data, lowerbounds, upperbounds, nbd, &gvalue, &g_outer_R,
                      &rv_dg_outer_R, &failcode, 
	              &gparams,
	              factr,
                      pgtol, &fncount, &grcount,
                      maxiters, msg, trace, nREPORT);
		      
    if(!failcode){dag->nodeScoresErrCode[nodeid]=0;/*bestsize=gparams.finitestepsize;*/break;}/** break out of for loop if no error as we are done **/	     
   
   } /** end of for loop so now have mode estimates */
     
   if(failcode){Rprintf("%s at node %d\n",msg,nodeid+1);/** notify if there is an error and set final error code **/
		     dag->nodeScoresErrCode[nodeid]=1;
   } 
     
    gparams.finitestepsize=finitestepsize;/** reset */
    if(storeModes){/** keep a copy of the parameter modes found for use later in other function calls etc**/
	 index=0;    /*Rprintf("size of beta=%d %f %f\n",myBeta->size, gsl_vector_get(myBeta,0),gsl_vector_get(myBeta,1));*/
		     for(i=0;i<dag->numNodes+3;i++){/** roll myBeta into dag->modes into the appropriate columns**/
		       if(gsl_matrix_get(dag->modes,nodeid,i)!=DBL_MAX){
			 gsl_matrix_set(dag->modes,nodeid,i,gsl_vector_get(myBeta,index++));}} 
                   /*for(i=0;i<dag->numNodes+3;i++){Rprintf("%e ",gsl_matrix_get(dag->modes,nodeid,i));}Rprintf("\n");*/
		   
		   }     
   
   if(!ModesONLY){/** only want modes so can skip the rest **/
     
   /** now compute the hessian at the step size with lowest error **/
   /*Rprintf("starting hessian estimation\n");*/
   n=obsdata->numDataPts;
   m=designmatrix->numparams+1;/** inc precision */
   perm = gsl_permutation_alloc (m);
 
   /** just re-use as much of existing gparams as possible - so names are meaningless e.g. betafixed is actually gvalue */
   gparams.betaincTau=myBeta;
   gparams.nDim=n;
   gparams.mDim=m;
   gparams.perm=perm;
   gparams.mattmp2=hessgvalues;
   gparams.mattmp3=hessgvalues3pt;
   gparams.betafixed=gvalue;
   
   
    F.f = &compute_mlik_nm;
    F.params = &gparams;
    F.n = 1;
   
    T = gsl_multimin_fminimizer_nmsimplex2;
    s = gsl_multimin_fminimizer_alloc (T, 1);
   
    finitestepsize_vec = gsl_vector_alloc (1);
    gsl_vector_set (finitestepsize_vec, 0, h_guess);
    nmstepsize = gsl_vector_alloc (1);
    gsl_vector_set_all (nmstepsize, h_guess); 
    gsl_multimin_fminimizer_set (s, &F, finitestepsize_vec,nmstepsize);
    status = GSL_SUCCESS;
    
     iter=0;
   
    do
         {
           iter++;/*Rprintf("iter=%d\n",iter);*/
           status = gsl_multimin_fminimizer_iterate (s);
     
           if (status) 
             break;
	   
	   nm_size = gsl_multimin_fminimizer_size (s);
           status = gsl_multimin_test_size (nm_size, h_epsabs);
     /*
           if (status == GSL_SUCCESS)
             {
               Rprintf ("converged to minimum at\n");
             }
     */
           /*Rprintf ("iter=%5d error in mlik=%3.5e using fin.diff step= %3.2e nmsize=%3.2e\n", iter,s->fval,gsl_vector_get (s->x, 0),nm_size);*/
    
         }
       while (status == GSL_CONTINUE && iter < maxiters_hessian);
       if( (status != GSL_SUCCESS)){/*actual_status=status;*//** copy for use later **/
                                    status=GSL_FAILURE;} /** solution failed to achieve a value below h_epsabs **/                                                               
	 
    finitestepsize=gsl_vector_get(s->x,0);/** get best fin.diff stepsize **/
    finitestepsize_nm=finitestepsize;/** save nelder mead estimate */
    dag->hessianError[nodeid]= s->fval;/** get fin.diff error **/
    
    gsl_multimin_fminimizer_free (s);
   
   /** README - it might be possible to avoid the brent by increasing the epsabs error in nelder mead (and no. of iterations), although for hard cases
       this probably will not work but may give a little greater accuracy for easier cases, These are the hessian.params arg in R */
    
   if(dag->hessianError[nodeid]!=DBL_MAX && dag->hessianError[nodeid]>max_hessian_error){Rprintf("Error in mlik = %e > tolerance of %e so continuing optimisation using Brent initial guess h=%e\n",
                                                   dag->hessianError[nodeid],max_hessian_error,finitestepsize); 
   
     /* Rprintf("stepsize after NM= %e\n",finitestepsize);*/
  
     T1 = gsl_min_fminimizer_brent;
     s1 = gsl_min_fminimizer_alloc (T1);
	 
      /** must find lower and upper such that f(lower)<f(finitestepsize)<f(upper) **/ 
      /** use an interval of lower=finitestepsize/FACTOR, upper=finitestepsize*FACTOR and then start at the lower end and travel up until
      find suitable endpoints - seems to work but not exactly fast!**/
      best_Error=dag->hessianError[nodeid];/** original error from nelder */
      best_h=finitestepsize;               /** original stepsize from nelder */
      found=0;/** flag for found good result */
      lower=finitestepsize/myfactor_brent;
      upper=myfactor_brent*finitestepsize;
      lower_f=compute_mlik_brent(lower, &gparams);/** value at lower point **/
      upper_f=compute_mlik_brent(upper, &gparams);/** value at higher point **/
      increLogscale=(gsl_sf_log(upper)-gsl_sf_log(lower))/num_intervals_brent;/** on a log scale */
      for(delta=gsl_sf_log(lower)+increLogscale;delta<gsl_sf_log(upper);delta+=increLogscale){/** linear increments on a log scale **/
	R_CheckUserInterrupt();/** allow an interupt from R console */ 
	/** find a point which has f(x) lower than f(lower) and f(upper) **/
	 new_f_min=compute_mlik_brent(gsl_sf_exp(delta), &gparams); 
	/* Rprintf("lower=%e, delta=%e, upper=%e\n",lower,gsl_sf_exp(delta),upper);*/
        if(lower_f>new_f_min && new_f_min<upper_f  && get_best_stepsize(gsl_sf_exp(delta),lower,upper,maxiters_hessian_brent,&gparams, &compute_mlik_brent,
	                                                               s1,&finitestepsize,&(dag->hessianError[nodeid]) )<=max_hessian_error){/** have an interval suitable for bracketing **/
	                                                           /** above is address so can store error withouth rerunning function */
	  /*finitestepsize=delta;*/
	  found=1;
	  status=GSL_SUCCESS;
	  break;/** break out of delta - so have found new x_min **/
	} else {/** have not got a good enough error but save the best error and stepsize so far found **/
	        if(dag->hessianError[nodeid]<best_Error){best_Error=dag->hessianError[nodeid];
	                                                best_h=finitestepsize;
		                                        }
	        }
      } /** end of search for interval and good error **/
         
      if(!found){/** have not found a suitably small error but may have found a better error than nelder mead **/
        
       /** best_Error will either be the original nelder mean value or better, and best_h is the corresponding stepsize**/
	                                 dag->hessianError[nodeid]=best_Error;
					 finitestepsize=best_h;
        /** reset back to nelder-mead estimate **/
	status=GSL_FAILURE;/** set to failure since we did not achieve the lower error asked for */
      Rprintf("failed to meet tolerance of %e and using best error estimate found of %e\n",max_hessian_error,dag->hessianError[nodeid]);}

    gsl_min_fminimizer_free (s1);
   
   } /** end of error being too large **/
 
   if(dag->hessianError[nodeid]==DBL_MAX){/** in this case nelder mead could not estimate the hessian error so abort as something is probably
                                               very wrong here */
                                          error("");}/** use the R tryCatch rather than the switch for status below **/
                                          

       switch(status){  /** choose which type of node we have */
                     case GSL_SUCCESS:{    
		                     /** successful finite diff so now do final computation with the optimal step size **/
                                     /*Rprintf("search for optimal step size : status = %s at nodeid %d\n", gsl_strerror (status),nodeid+1);*/
                                     rv_hessg_outer(myBeta,&gparams, hessgvalues,finitestepsize,hessgvalues3pt);/** EDIT BACK to "finitestepsize" start with LARGEST STEPSIZE **/
				    /* Rprintf("HESSIAN using stepsize=%e\n",finitestepsize);
				     for(i1=0;i1<hessgvalues->size1;i1++){
				        for(i2=0;i2<hessgvalues->size2;i2++){Rprintf("%e ",gsl_matrix_get(hessgvalues,i1,i2));}Rprintf("\n");}   */
                                     status=gsl_linalg_LU_decomp(hessgvalues,perm,&sss);
                                     mydet=gsl_linalg_LU_lndet(hessgvalues);/** compute determinant but this might be a nan - overflow? gsl_linalg_LU_lndet*/
                                     logscore= -n*gvalue-0.5*mydet+(m/2.0)*log((2.0*M_PI)/n);/** this is the final value */
				     if(gsl_isnan(logscore)){logscore= R_NaN;dag->nodeScoresErrCode[nodeid]=2;}
				     dag->nodeScores[nodeid]=logscore;
				       
		                      break;  
		     }
       
		     case GSL_FAILURE: {/** the minimiser did not find a minimum meeting the accuracy requirements and so may be unreliable **/
		                       Rprintf ("-- ERROR! -- search for optimal step size error: status = %s at nodeid %d\n", gsl_strerror (status),nodeid+1);
                                       rv_hessg_outer(myBeta,&gparams, hessgvalues,finitestepsize,hessgvalues3pt);/** start with LARGEST STEPSIZE **/
                                       /*Rprintf("HESSIAN using stepsize=%e\n",finitestepsize);
				       for(i1=0;i1<hessgvalues->size1;i1++){
				        for(i2=0;i2<hessgvalues->size2;i2++){Rprintf("%e ",gsl_matrix_get(hessgvalues,i1,i2));}Rprintf("\n");} */
				        
				       status=gsl_linalg_LU_decomp(hessgvalues,perm,&sss);
                                       mydet=gsl_linalg_LU_lndet(hessgvalues);/** compute determinant but this might be a nan - overflow? gsl_linalg_LU_lndet*/
                                       logscore= -n*gvalue-0.5*mydet+(m/2.0)*log((2.0*M_PI)/n);/** this is the final value */
				       dag->nodeScoresErrCode[nodeid]=4;
				       if(gsl_isnan(logscore)){logscore= R_NaN;dag->nodeScoresErrCode[nodeid]=2;}
				       dag->nodeScores[nodeid]=logscore;
				       
		                       
				       break; 
		     }
		     
		     default:{Rprintf("got case %s\n",gsl_strerror (status)); error("in default switch in calc_node_Score_binary_rv_R() - should never get here!");}  
		     
          }
          
        
   /** try the bounded search for h stepsize rather than one-dim min which needs bound specified **/     
   } /** end of ModesONLY **/     
  
   /** now free up allocated memory **/
   for(i=0;i<designmatrix->numUnqGrps;i++){gsl_matrix_free(designmatrix->array_of_designs[i]);
                                           gsl_vector_free(designmatrix->array_of_Y[i]);}
   gsl_vector_free(designmatrix->priormean);
   gsl_vector_free(designmatrix->priorsd);
   gsl_vector_free(designmatrix->priorgamshape);
   gsl_vector_free(designmatrix->priorgamscale);
   gsl_vector_free(designmatrix->Y);
   gsl_matrix_free(designmatrix->datamatrix_noRV);
   /*gsl_vector_free(dgvalues);*/
   gsl_vector_free(myBeta); 
   gsl_vector_free(vectmp1);
   gsl_vector_free(vectmp2);
   gsl_matrix_free(mattmp2);
   gsl_matrix_free(mattmp3);
   gsl_matrix_free(mattmp4);
   gsl_permutation_free(initsperm);
   gsl_vector_free(vectmp1long);
   gsl_vector_free(vectmp2long);
   gsl_vector_free(localbeta);
   gsl_vector_free(localbeta2);
   gsl_matrix_free(hessgvalues);
   gsl_matrix_free(hessgvalues3pt);
   gsl_vector_free(finitefactors);
   /*gsl_vector_free(factorindexes);*/
   
   if(!ModesONLY){/** didn't allocate these so don't unallocate! */
    gsl_permutation_free(perm);
    gsl_vector_free(finitestepsize_vec);
    gsl_vector_free(nmstepsize);}
   
   /*if(!failcode){*//*}*/
   
   /*dag->nodeScores[nodeid]=logscore;*/

}
// GillespieSolver::step() function returns dt.
double GillespieSolver::step(void)
{
	if (this->m.reactions.size() == 0 || this->w.current_state.size() == 0) {
		// reactions or world status not initialized.
		return 0.0;
	}

	std::vector<double>	a( this->m.reactions.size() );

	for(unsigned int idx(0); idx < this->m.reactions.size(); idx++) {
		a[idx] = this->m.reactions[idx].k;	// implement and fix accessor 
		for(
			std::vector<id_stoichiometry>::iterator it_reactant(this->m.reactions[idx].reactants.begin());
			it_reactant != this->m.reactions[idx].reactants.end();
			it_reactant++
		) 
		{
			a[idx] *= combination(
						this->w.current_state[ it_reactant->first ],
						it_reactant->second	);
		}
	}

	double a_total = std::accumulate(a.begin(), a.end(), double(0.0) );

	if (a_total == 0.0) {
		// There are no reactions to heppen.
		return 0.0;
	}

	double rnd_num1 = gsl_rng_uniform(this->random_handle);
	double dt = gsl_sf_log(1.0 / rnd_num1) / double(a_total);
	double rnd_num2 = gsl_rng_uniform(this->random_handle) * a_total;

	int u(-1);
	double acc(0.0);
	int len = a.size();
	do {
		u++;
		acc += a[u];
	} while ( acc < rnd_num2 && u < len - 1);

	this->w.current_t += dt;
	//	Ru(this->m.rections[u]) occurs.
	for(
		std::vector<id_stoichiometry>::iterator it(this->m.reactions[u].reactants.begin());
		it != this->m.reactions[u].reactants.end();
		it++
	   )
	{
		this->w.current_state[it->first] -= it->second;	//second is stoichiomety
	}

	for(
		std::vector<id_stoichiometry>::iterator it(this->m.reactions[u].products.begin());
		it != this->m.reactions[u].products.end();
		it++
	   )
	{
		this->w.current_state[it->first] += it->second;
	}

	return dt;
}
Exemple #27
0
//--------------------------------------------------------------------------------------------------
double  thin_plate_kernel( const double& h, const double& R ) {
  return ( h * h + R * R ) * gsl_sf_log( h * h + R * R );
}
Exemple #28
0
/** ****************************************************************************************************
 ***** calc an individual logistic regression model 
 *******************************************************************************************************/
void calc_poisson_marginal_rv_R(network *dag, datamatrix *obsdata, int nodeid,  int errverbose,
                                datamatrix *designmatrix, const double priormean, const double priorsd, const double priorgamshape, const double priorgamscale,
                                const int maxiters, const double epsabs, double epsabs_inner, int maxiters_inner, double finitestepsize, int verbose,
				double h_guess, double h_epsabs, int maxiters_hessian,
			       double *denom_modes, int paramid, double betafixed, double mlik, double *posterior,
				double max_hessian_error,double myfactor_brent, int maxiters_hessian_brent, double num_intervals_brent){

 int i,j,status,sss,haveprecision,iter=0;
  gsl_vector *myBeta,*vectmp1,*vectmp2,*vectmp1long,*vectmp2long,*localbeta,*localbeta2,/* *dgvalues,*/ *betafull,*finitefactors,*finitestepsize_vec,*nmstepsize;/** this will hold the parameter point estimates + 1 is for precision of rv's */
  struct fnparams gparams;/** for passing to the gsl zero finding functions */
  double gvalue;
  gsl_matrix *mattmp2,*mattmp3,*mattmp4,*hessgvalues,*hessgvaluesfull,*hessgvalues3pt,*hessgvaluesfull3pt;
  double mydet=0.0,logscore=0.0;
  gsl_permutation *initsperm;
  gsl_permutation *perm=0;
  int n,m;
  double val=0.0;double nm_size=0.0;
  const gsl_multimin_fminimizer_type *T;
       gsl_multimin_fminimizer *s;
     gsl_multimin_function F;
     double lower,upper,lower_f,upper_f;int found=0; double delta=0.0,new_f_min=0.0;
   double increLogscale=0.0, best_Error=0.0,best_h=0.0, hessian_Error=0.0;
  const gsl_min_fminimizer_type *T1;
  gsl_min_fminimizer *s1;  
 /*const gsl_min_fminimizer_type *T;
  gsl_min_fminimizer *s;
  gsl_function F;*/
  int nDim;/** dimension of optim problem */
  int *nbd;/** nbd is an integer array of dimension nDim.
	                                      On entry nbd represents the type of bounds imposed on the variables, and must be specified as follows:
	                                      nbd(i)=0 if x(i) is unbounded,
		                              1 if x(i) has only a lower bound,
		                              2 if x(i) has both lower and upper bounds, and
		                              3 if x(i) has only an upper bound.
	                                      On exit nbd is unchanged.*/
  
  double *lowerbounds,*upperbounds;
  int failcode;/** check code see R ?optim - if non-zero the a problem **/
  double factr=1e-07;/** error size scaler - this is the default value*/
  double pgtol=1e-07;/** again default value */
  int fncount,grcount;/** hold number of evaluations */
  char msg[60];/** error message */
  int trace=0;/** like verbose */
  int nREPORT=1000;/** report freq*/
  int lmm=5;/** see R ?optim - number of function evals to store - default */
  /** want to find the modes of the function g(betas) where betas=b_0,b_1,,...,tau, the latter being precision */
  /** g(betas) is not differentiable as it contains integrals (which themselves need Laplace estimates **/
  
  /** SETUP things which are the same across all data chunks - groups  */
  /** build design matrix which is designmatrix->datamatrix=X, designmatrix->Y=Y plus priors designmatrix->priorsd, designmatrix->priormean **/
  /** NOTE: design matrix here does include the random effect term **/
  /** note - numparams does NOT include precision term - numpars +1 */
  

  build_designmatrix_pois_rv(dag,obsdata,priormean, priorsd,priorgamshape,priorgamscale,designmatrix,nodeid,0);
  
  nDim=designmatrix->numparams+1-1;/** +1 for prec -1 for marginal */ 
  lowerbounds=(double *)R_alloc(nDim,sizeof(double*));
  upperbounds=(double *)R_alloc(nDim,sizeof(double*));
  nbd=(int *)R_alloc(nDim,sizeof(int*));
  for(i=0;i<nDim;i++){lowerbounds[i]=-DBL_MAX;
                        upperbounds[i]=DBL_MAX;
			nbd[i]=0;}
  /** unbounded - by default */
  
  if(paramid==(designmatrix->numparams+1)-1){haveprecision=1;} else {haveprecision=0;}
  
  if(!haveprecision){/** we are NOT marginalising over the precision parameter and so need a contrained optimiser where the LAST term is the precision term
                         and so we set a bound for this */
    nbd[nDim-1]=1;/** enforce a lower bound */
    lowerbounds[nDim-1]=0.001;/** a hard lower bound - set to zero would cause a problem */
  }
   
  finitefactors = gsl_vector_alloc(7);/** used to change stepsize in hessian estimate **/			
  gsl_vector_set(finitefactors,0,1.0E-03);gsl_vector_set(finitefactors,1,1.0E-02);gsl_vector_set(finitefactors,2,1.0E-01);
  gsl_vector_set(finitefactors,3,1.0);gsl_vector_set(finitefactors,4,1.0E+01);gsl_vector_set(finitefactors,5,1.0E+02);
  gsl_vector_set(finitefactors,6,1.0E+03);
  
  /*Rprintf("nDim=%d paramID=%d\n",nDim,paramid);
  for(i=0;i<nDim;i++){Rprintf("lower=%d ",lowerbounds[i]);}Rprintf("\n");*/
  vectmp1 = gsl_vector_alloc (designmatrix->numparams);/** scratch space **/
  vectmp2 = gsl_vector_alloc (designmatrix->numparams);/** scratch space **/
  mattmp2 = gsl_matrix_alloc (obsdata->numDataPts,designmatrix->numparams);
  mattmp3 = gsl_matrix_alloc (designmatrix->numparams,designmatrix->numparams);
  mattmp4 = gsl_matrix_alloc (designmatrix->numparams,designmatrix->numparams);
  initsperm = gsl_permutation_alloc (designmatrix->numparams);/** for use with initial guesses */
  vectmp1long = gsl_vector_alloc (obsdata->numDataPts);/** scratch space **/
  vectmp2long = gsl_vector_alloc (obsdata->numDataPts);
  localbeta = gsl_vector_alloc (designmatrix->numparams);/** scratch space in later functions - excl. precision **/
  localbeta2 = gsl_vector_alloc (designmatrix->numparams+1);/** scratch space in later functions - excl. precision **/
  betafull = gsl_vector_alloc (designmatrix->numparams+1);/** */
  hessgvaluesfull = gsl_matrix_alloc (designmatrix->numparams+1,designmatrix->numparams+1); /**  */ 
  hessgvaluesfull3pt = gsl_matrix_alloc (designmatrix->numparams+1,designmatrix->numparams+1);
 
  myBeta = gsl_vector_alloc (designmatrix->numparams+1-1);/** inc rv precision : -1 as marginal calc */
  hessgvalues = gsl_matrix_alloc (designmatrix->numparams+1-1,designmatrix->numparams+1-1); /** -1 as marginal calc */ 
  hessgvalues3pt = gsl_matrix_alloc (designmatrix->numparams+1-1,designmatrix->numparams+1-1);
  /*dgvalues = gsl_vector_alloc (designmatrix->numparams+1-1);*//** inc rv precision : -1 as marginal calc */
  
  gparams.designdata=designmatrix;
  
   gparams.vectmp1=vectmp1;
   gparams.vectmp2=vectmp2;
   gparams.mattmp2=mattmp2;
   gparams.mattmp3=mattmp3;
   gparams.mattmp4=mattmp4;
   gparams.perm=initsperm;
   gparams.vectmp1long=vectmp1long;
   gparams.vectmp2long=vectmp2long;
   gparams.beta=localbeta;/** beta without precision */
   gparams.hessgvalues=hessgvaluesfull;
   gparams.hessgvalues3pt=hessgvaluesfull3pt;
   gparams.betafull=betafull;/** will hold the full beta inc. precision not just marginal */
   gparams.epsabs_inner=epsabs_inner;
   gparams.maxiters_inner=maxiters_inner;
   gparams.verbose=verbose;
   gparams.finitestepsize=finitestepsize;
   
   gparams.betafixed=0.0;/** these will be changed in loop below*/
   gparams.betaindex=paramid;/** this is fixed - the variable for which the posterior is calculated **/
   
   n=obsdata->numDataPts;
   m=designmatrix->numparams+1-1;/** inc precision, -1 marginal */
   
   perm = gsl_permutation_alloc (m);
   j=0;
      for(i=0;i<designmatrix->numparams+1;i++){if(i!= paramid){gsl_vector_set(myBeta,j++,denom_modes[i]);}} /** use modes as initial values **/     
  
   /*Rprintf("MODES: ");for(i=0;i<designmatrix->numparams;i++){Rprintf("= %f\n",gsl_vector_get(myBeta,i));}Rprintf("\nEND\n");*/
   
   status=GSL_SUCCESS;
   gparams.betafixed=betafixed;
  
     /*Rprintf("evaluating marginal at %f\n",gparams.betafixed);*/
     for(i=0;i<finitefactors->size;i++){/** go forwards through the factors so start with SMALLEST STEPSIZE */
   /*Rprintf("step size iteration %d\n",i);*/
     failcode=0;/** reset*/
    gparams.finitestepsize=gsl_vector_get(finitefactors,i)*finitestepsize;
    
      lbfgsb(nDim, lmm, myBeta->data, lowerbounds, upperbounds, nbd, &gvalue, &g_pois_outer_marg_R,
                      &rv_dg_pois_outer_marg_R, &failcode, 
	              &gparams,
	              factr,
                      pgtol, &fncount, &grcount,
                      maxiters, msg, trace, nREPORT);
		      
    if(!failcode){dag->nodeScoresErrCode[nodeid]=0;/*bestsize=gparams.finitestepsize;*/break;}
     }	    

if(failcode){Rprintf("%s at node %d\n",msg,nodeid+1);/** notify if there is an error and set final error code **/
		   } 		
/*Rprintf("MARGINAL gvalue=%f nodeid=%d\n",gvalue,nodeid+1);*/		
gparams.finitestepsize=finitestepsize;/** reset */
/*for(i=0;i<myBeta->size;i++){Rprintf("%f ",gsl_vector_get(myBeta,i));}Rprintf("\n");*/
/** just re-use as much of existing gparams as possible - so names are meaningless e.g. betafixed is actually gvalue */
   /*gparams.betaincTau=betafull;*/
   gparams.betastatic=myBeta;/** this is important as we are passing the addres of myBeta and so don't want any other function changing this! **/
   gparams.nDim=n;
   gparams.mDim=m;
   gparams.perm=perm;
   gparams.mattmp2=hessgvalues;
   gparams.mattmp3=hessgvalues3pt;
   gparams.betafixed=betafixed;
   gparams.gvalue=gvalue;
   
    F.f = &compute_mlik_pois_marg_nm;
    F.params = &gparams;
    F.n = 1;
   
    T = gsl_multimin_fminimizer_nmsimplex2;
    s = gsl_multimin_fminimizer_alloc (T, 1);
   
    finitestepsize_vec = gsl_vector_alloc (1);
    gsl_vector_set (finitestepsize_vec, 0, h_guess);
    nmstepsize = gsl_vector_alloc (1);
    gsl_vector_set_all (nmstepsize, h_guess); 
    gsl_multimin_fminimizer_set (s, &F, finitestepsize_vec, nmstepsize);
    status = GSL_SUCCESS;
    
     iter=0;
   
    do
         {
           iter++;
           status = gsl_multimin_fminimizer_iterate (s);
     
           if (status) 
             break;
	   
	   nm_size = gsl_multimin_fminimizer_size (s);
           status = gsl_multimin_test_size (nm_size, h_epsabs);
     /*
           if (status == GSL_SUCCESS)
             {
               Rprintf ("converged to minimum at\n");
             }
     
           Rprintf ("iter=%5d error in mlik=%10.10e using fin.diff step= %10.10e\n", iter,s->fval,gsl_vector_get (s->x, 0));
    */
         }
       while (status == GSL_CONTINUE && iter < maxiters_hessian);
       if( (status != GSL_SUCCESS)){/*actual_status=status;*//** copy for use later **/
                                    status=GSL_FAILURE;} /** solution failed to achieve a value below h_epsabs **/                                                               
	 
    finitestepsize=gsl_vector_get(s->x,0);/** get best fin.diff stepsize **/
    
    /*dag->hessianError[nodeid]= s->fval;*//** get fin.diff error **/
    hessian_Error=s->fval;
    gsl_multimin_fminimizer_free (s);
    
 if(hessian_Error>max_hessian_error){Rprintf("Error in mlik = %e > tolerance of %e so continuing optimisation using Brent\n",hessian_Error,max_hessian_error); 
   
     /* Rprintf("stepsize after NM= %e\n",finitestepsize);*/
  
     T1 = gsl_min_fminimizer_brent;
     s1 = gsl_min_fminimizer_alloc (T1);
	 
      /** must find lower and upper such that f(lower)<f(finitestepsize)<f(upper) **/ 
      /** use an interval of lower=finitestepsize/FACTOR, upper=finitestepsize*FACTOR and then start at the lower end and travel up until
      find suitable endpoints - seems to work but not exactly fast!**/
      best_Error=hessian_Error;/** original error from nelder */
      best_h=finitestepsize;               /** original stepsize from nelder */
      found=0;/** flag for found good result */
      lower=finitestepsize/myfactor_brent;
      upper=myfactor_brent*finitestepsize;
      lower_f=compute_mlik_pois_marg_brent(lower, &gparams);/** value at lower point **/
      upper_f=compute_mlik_pois_marg_brent(upper, &gparams);/** value at higher point **/
      increLogscale=(gsl_sf_log(upper)-gsl_sf_log(lower))/num_intervals_brent;/** on a log scale */
      for(delta=gsl_sf_log(lower)+increLogscale;delta<gsl_sf_log(upper);delta+=increLogscale){/** linear increments on a log scale **/
	R_CheckUserInterrupt();/** allow an interupt from R console */ 
	/** find a point which has f(x) lower than f(lower) and f(upper) **/
	 new_f_min=compute_mlik_pois_marg_brent(gsl_sf_exp(delta), &gparams); 
	 Rprintf("lower=%e, delta=%e, upper=%e\n",lower,gsl_sf_exp(delta),upper);
        if(lower_f>new_f_min && new_f_min<upper_f  && get_best_stepsize_pois_marg(gsl_sf_exp(delta),lower,upper,maxiters_hessian_brent,&gparams, &compute_mlik_pois_marg_brent,
	                                                               s1,&finitestepsize,&hessian_Error)<=max_hessian_error){/** have an interval suitable for bracketing **/
	                                                           /** above is address so can store error withouth rerunning function */
	  /*finitestepsize=delta;*/
	  found=1;
	  status=GSL_SUCCESS;
	  break;/** break out of delta - so have found new x_min **/
	} else {/** have not got a good enough error but save the best error and stepsize so far found **/
	        if(hessian_Error<best_Error){best_Error=hessian_Error;
	                                                best_h=finitestepsize;}
	        }
      } /** end of search for interval and good error **/
         
      if(!found){/** have not found a suitably small error but may have found a better error than nelder mead **/
        
       /** best_Error will either be the original nelder mean value or better, and best_h is the corresponding stepsize**/
	                                 hessian_Error=best_Error;
					 finitestepsize=best_h;
        /** reset back to nelder-mead estimate **/
	status=GSL_FAILURE;/** set to failure since we did not achieve the lower error asked for */
      Rprintf("failed to meet tolerance of %e and using best error estimate found of %e\n",max_hessian_error,hessian_Error);}

    gsl_min_fminimizer_free (s1);
   
   } /** end of error being too large **/
   
       switch(status){  /** choose which type of node we have */
                     case GSL_SUCCESS:{    
		                     /** successful finite diff so now do final computation with the optimal step size **/
                                     /*Rprintf("search for optimal step size : status = %s at nodeid %d\n", gsl_strerror (status),nodeid+1);*/
                                     rv_hessg_pois_outer_marg(myBeta,&gparams, hessgvalues,finitestepsize,hessgvalues3pt);/**  start with LARGEST STEPSIZE **/
                                    /* Rprintf("HESSIAN MARGINAL using stepsize =%e\n",finitestepsize);
				     for(i1=0;i1<hessgvalues->size1;i1++){
				        for(i2=0;i2<hessgvalues->size2;i2++){Rprintf("%e ",gsl_matrix_get(hessgvalues,i1,i2));}Rprintf("\n");}*/
				        
				     status=gsl_linalg_LU_decomp(hessgvalues,perm,&sss);
                                     mydet=gsl_linalg_LU_lndet(hessgvalues);/** compute determinant but this might be a nan - overflow? gsl_linalg_LU_lndet*/
                                     logscore= -n*gvalue-0.5*mydet+(m/2.0)*log((2.0*M_PI)/n);/** this is the final value */
                                     val=exp(logscore-mlik); /*Rprintf("f(node)=%f %f %f %f\n",val, mydet,logscore,mlik);  */ 
                                       *posterior=val;
		                      break;  
		     }
       
		     
		     case GSL_FAILURE: {/** the minimiser did not find a minimum meeting the accuracy requirements and so may be unreliable **/
		                       Rprintf ("-- ERROR! -- search for optimal step size error: status = %s at nodeid %d\n", gsl_strerror (status),nodeid+1);
                                       rv_hessg_pois_outer_marg(myBeta,&gparams, hessgvalues,finitestepsize,hessgvalues3pt);/** start with LARGEST STEPSIZE **/
                                       /*Rprintf("HESSIAN MARGINAL using stepsize =%e\n",finitestepsize);
				       for(i1=0;i1<hessgvalues->size1;i1++){
				        for(i2=0;i2<hessgvalues->size2;i2++){Rprintf("%e ",gsl_matrix_get(hessgvalues,i1,i2));}Rprintf("\n");}*/
				        
				        status=gsl_linalg_LU_decomp(hessgvalues,perm,&sss);
                                       mydet=gsl_linalg_LU_lndet(hessgvalues);/** compute determinant but this might be a nan - overflow? gsl_linalg_LU_lndet*/
                                       logscore= -n*gvalue-0.5*mydet+(m/2.0)*log((2.0*M_PI)/n);/** this is the final value */
				       val=exp(logscore-mlik); /*Rprintf("f(node)=%f %f %f %f\n",val, mydet,logscore,mlik); */  
                                       *posterior=val;
		                       
				       break; 
		     }
		     
		     default:{Rprintf("got case %s\n",gsl_strerror (status)); error("in default switch in calc_node_Score_binary_rv_R() - should never get here!");}  
		     
          }

        
	
 /** now free up allocated memory **/
   for(i=0;i<designmatrix->numUnqGrps;i++){gsl_matrix_free(designmatrix->array_of_designs[i]);
                                           gsl_vector_free(designmatrix->array_of_Y[i]);}
   gsl_vector_free(designmatrix->priormean);
   gsl_vector_free(designmatrix->priorsd);
   gsl_vector_free(designmatrix->priorgamshape);
   gsl_vector_free(designmatrix->priorgamscale);
   gsl_vector_free(designmatrix->Y);
   gsl_matrix_free(designmatrix->datamatrix_noRV);
   /*gsl_vector_free(dgvalues);*/
   gsl_vector_free(myBeta); 
   gsl_vector_free(vectmp1);
   gsl_vector_free(vectmp2);
   gsl_matrix_free(mattmp2);
   gsl_matrix_free(mattmp3);
   gsl_matrix_free(mattmp4);
   gsl_permutation_free(initsperm);
   gsl_vector_free(vectmp1long);
   gsl_vector_free(vectmp2long);
   gsl_vector_free(localbeta);
   gsl_vector_free(localbeta2);
   gsl_vector_free(betafull);
   gsl_matrix_free(hessgvalues); 
   gsl_matrix_free(hessgvalues3pt);
   gsl_matrix_free(hessgvaluesfull);
   gsl_matrix_free(hessgvaluesfull3pt);
   gsl_permutation_free(perm);
   gsl_vector_free(finitefactors);
   gsl_vector_free(finitestepsize_vec);
   gsl_vector_free(nmstepsize);



}
Exemple #29
0
int main( int argc, char* argv[]){
  int i;	
  
  
double x=0;
double step_1= 0.000001;
double step_2= 0.0005;	
double step_3= 0.002;
double step_4= 0.01;
double step_5= 7;

//int n=atoi(argv[1]);
int n=5600;
	
double* tab_x;
double* tab_F;
double* tab_logF;

FILE* fp;
//fp= fopen("/home/jacques/Bureau/synchrotron1.txt","w");
fp= fopen("/Users/scorde/Dropbox/SeB/Codes/sources/E200_scripts/jl_ana/synchrotron1bis.txt","w");
if(fp==NULL){
	printf("invalid path"); 
	return 0;}

tab_x= (double*) malloc(n*sizeof(double));
tab_F= (double*) malloc(n*sizeof(double));
tab_logF= (double*) malloc(n*sizeof(double));


for (i=0;i<500;i++){
	x= x + step_1;
	tab_x[i]=x;	
	tab_F[i]=gsl_sf_synchrotron_1(x);
	tab_logF[i]=gsl_sf_log(tab_F[i]);
			}

for (i=500;i<2500;i++){
	x= x + step_2;
	tab_x[i]=x;	
	tab_F[i]=gsl_sf_synchrotron_1(x);
	tab_logF[i]=gsl_sf_log(tab_F[i]);
			}

for (i=2500;i<4500;i++){
	x= x + step_3;
	tab_x[i]=x;	
	tab_F[i]=gsl_sf_synchrotron_1(x);
	tab_logF[i]=gsl_sf_log(tab_F[i]);
			}

for (i=4500;i<5500;i++){
	x= x + step_4;
	tab_x[i]=x;	
	tab_F[i]=gsl_sf_synchrotron_1(x);
	tab_logF[i]=gsl_sf_log(tab_F[i]);
			}

for (i=5500;i<5600;i++){
	x= x + step_5;
	tab_x[i]=x;	
	tab_F[i]=gsl_sf_synchrotron_1(x);
	tab_logF[i]=gsl_sf_log(tab_F[i]);
			}

    
for (i=0;i<n;i++){
	printf("%f ", tab_x[i]);
	fprintf(fp, "%f ", tab_x[i]);
	}

	printf("\n");
	fprintf(fp,"\n");

for (i=0;i<n;i++){
	printf("%f ", tab_F[i]);
	fprintf(fp, "%f ", tab_F[i]);
	}

	printf("\n");
	fprintf(fp,"\n");

for (i=0;i<n;i++){
	printf("%f ", tab_logF[i]);
	fprintf(fp, "%f ", tab_logF[i]);
	}
	
	printf("\n");
fclose(fp);
free(tab_x);
free(tab_F);
free(tab_logF);
	
  return 0;
}
Exemple #30
0
double I1(double y0, double y)
{
	double p1 = 0.5*(y0+y)*gsl_sf_log((1.0+exp((y0+y)/2.0))/(1-exp((-y0+y)/2))); 
	double p2 = -0.5*(y0-y)*gsl_sf_log((1.0+exp((y0-y)/2.0))/(1-exp((-y0-y)/2))); 
	return p1+p2+gsl_sf_dilog(-exp(y0/2.0+y/2.0))+gsl_sf_dilog(exp(-y0/2.0-y/2.0))-gsl_sf_dilog(-exp(y0/2.0-y/2.0))-gsl_sf_dilog(exp(-y0/2.0+y/2.0));
}