Type objective_function<Type>::operator() ()
{
  // Data
  DATA_VECTOR( y_i );
  DATA_MATRIX( X_ij );

  // Parameters
  PARAMETER_VECTOR( b_j );
  PARAMETER_VECTOR( theta_z );

  // Objective funcction
  Type zero_prob = 1 / (1 + exp(-theta_z(0)));
  Type logsd = exp(theta_z(1));
  Type jnll = 0;
  int n_data = y_i.size();

  // Linear predictor
  vector<Type> linpred_i( n_data );
  linpred_i = X_ij * b_j;

  // Probability of data conditional on fixed effect values
  for( int i=0; i<n_data; i++){
    if(y_i(i)==0) jnll -= log( zero_prob );
    if(y_i(i)!=0) jnll -= log( 1-zero_prob ) + dlognorm( y_i(i), linpred_i(i), logsd, true );
  }
  
  // Reporting
  REPORT( zero_prob );
  REPORT( logsd );
  REPORT( linpred_i );
  return jnll;
}
예제 #2
0
파일: socatt.cpp 프로젝트: GodinA/adcomp
Type objective_function<Type>::operator() ()
{
  
  DATA_FACTOR(y); //categorical response vector
  DATA_INTEGER(S); //number of response categories
  DATA_MATRIX(X); // Fixed effects design matrix
  DATA_FACTOR(group);
  PARAMETER_VECTOR(b); // Fixed effects
  PARAMETER(logsigma);
  PARAMETER_VECTOR(tmpk); // kappa ( category thresholds)
  PARAMETER_VECTOR(u);    // Random effects
  Type sigma = exp(logsigma);
  vector<Type> alpha = tmpk;
  for(int s=1;s<tmpk.size();s++)
    alpha(s) = alpha(s-1) + exp(tmpk(s));
  Type ans=0;
  ans -= sum(dnorm(u,Type(0),Type(1),true));
  vector<Type> eta = X*b;
  for(int i=0; i<y.size(); i++){
    eta(i) += sigma*u(group(i));
    Type P;
    if(y(i)==(S-1)) P = 1.0; else P = Type(1)/(Type(1)+exp(-(alpha(y(i))-eta(i))));
    if(y(i)>0) P -= Type(1)/(Type(1)+exp(-(alpha(y(i)-1)-eta(i))));
    ans -= log(1.e-20+P);
  }
  
  return ans;
}
예제 #3
0
Type objective_function<Type>::operator() ()
{
  DATA_VECTOR(obs);
  DATA_FACTOR(group);
  PARAMETER_VECTOR(mu);
  PARAMETER_VECTOR(sd);
  Type res=0;
  for(int i=0;i<obs.size();i++){
    res -= dnorm(obs[i],mu[group[i]],sd[group[i]],true);
  }
  return res;
}
예제 #4
0
파일: srw.cpp 프로젝트: DanOvando/Feb2016
Type objective_function<Type>::operator() ()
{
  DATA_VECTOR(observed);

  PARAMETER_VECTOR(population);
  PARAMETER(log_process_error);
  PARAMETER(log_obs_error);
  
  Type process_error = exp(log_process_error);
  Type obs_error     = exp(log_obs_error);
  
  int n_obs          = observed.size();  // number of observations
  
  Type nll           = 0; // negative log likelihood
  
  // likelihood for state transitions
  for(int y=1; y<n_obs; y++){
    Type m = population[y-1];
    nll   -= dnorm(population(y), m, process_error, true);
  }
  
  // likelihood for observations
  for(int y=0; y<n_obs; y++){
    nll -= dnorm(observed(y), population(y), obs_error, true);
  }
  
  ADREPORT(process_error);
  ADREPORT(obs_error);
  
  return nll;
}
예제 #5
0
파일: sumtest.cpp 프로젝트: GodinA/adcomp
Type objective_function<Type>::operator() ()
{
  PARAMETER_VECTOR(x);
  Type res=0;
  for(int i=0;i<x.size();i++)res+=x[i];
  res=res*res;
  return res;
}
예제 #6
0
파일: bym.cpp 프로젝트: rmp15/models
Type objective_function<Type>::operator() () {
	DATA_VECTOR(E);
	DATA_VECTOR(deaths);
	DATA_SPARSE_MATRIX(P); // precision matrix

	PARAMETER(alpha);
	PARAMETER(log_sigma2_V);
	PARAMETER_VECTOR(V);

	PARAMETER(log_sigma2_U);
	PARAMETER_VECTOR(W);

	int N = E.size();

	vector<Type> log_deaths_pred(N);
	vector<Type> mu(N);

	Type nll = 0;

	Type tau_V = 1 / exp(log_sigma2_V);
	Type tau_U = 1 / exp(log_sigma2_U);

	nll -= dnorm(alpha, Type(0), Type(10), 1);
	nll -= dgamma(tau_V, Type(0.5), Type(2000), 1);
	nll -= dgamma(tau_U, Type(0.5), Type(2000), 1);
	nll -= dnorm(V, Type(0), exp(0.5 * log_sigma2_V), 1).sum();

	vector<Type> tmp = P * W;
	nll -= -0.5 * (W * tmp).sum();
	vector<Type> U = W * exp(0.5 * log_sigma2_U);

	nll -= dnorm(U.sum(), Type(0), Type(0.00001), 1);
	for (size_t i = 0; i < N; i++) log_deaths_pred(i) = log(E(i)) + alpha + V(i) + U(i);
	for (size_t i = 0; i < N; i++) nll -= dpois(deaths(i), exp(log_deaths_pred(i)), 1);
	for (size_t i = 0; i < N; i++) mu(i) = exp(alpha + V(i) + U(i));

	vector<Type> deaths_pred = exp(log_deaths_pred);

	ADREPORT(U);
	ADREPORT(deaths_pred);
	ADREPORT(mu);

	return nll;
}
예제 #7
0
파일: ex1.cpp 프로젝트: yijay/TMB
Type objective_function<Type>::operator()()
{
 DATA_FACTOR(Sex);
 DATA_VECTOR(Age);
 DATA_VECTOR(Length);
 int n = Length.size();

 // These are the parameters (three are vectors; one is a scalar)
 PARAMETER_VECTOR(Linf);
 PARAMETER_VECTOR(Kappa);
 PARAMETER_VECTOR(t0);
 PARAMETER(LogSigma);
 Type Sigma = exp(LogSigma);
 vector<Type> LengthPred(n);

 // Provide the standard error of Sigma
 ADREPORT(Sigma);

 // Predictions and likelihoods
 for(int i=0;i<n;i++){
  Type Temp = Kappa(Sex(i))*(Age(i)-t0(Sex(i)));
  LengthPred(i) = Linf(Sex(i))*(1.0-exp(-Temp));
  }
 Type nll = -sum(dnorm(Length,LengthPred,Sigma,true));

 // Prediction for sex 1 and age 10
 Type Temp = Kappa(0)*(Type(10)-t0(0));
 Type PredLen10 = Linf(0)*(1.0-exp(-Temp));
 ADREPORT(PredLen10);

 // Predicted growth curve
 matrix<Type>LenPred(2,50);
 for (int Isex=0;Isex<2;Isex++)
  for (int Iage=1;Iage<=50;Iage++)
   {
   Temp = Kappa(Isex)*(Iage*1.0-t0(Isex));
   LenPred(Isex,Iage-1) = Linf(Isex)*(1.0-exp(-Temp));
   }
 REPORT(LenPred);

 return nll;
}
예제 #8
0
파일: wtmb.cpp 프로젝트: amart/Feb2016
Type objective_function<Type>::operator() ()
{
  DATA_ARRAY(wtage);
  DATA_ARRAY(wtcv);

  // matrix<Type> yfit(n);
  int nr = wtage.dim(0);
  int nc = wtage.dim(1);

  PARAMETER(log_sd_coh);
  PARAMETER(log_sd_yr );
  PARAMETER_VECTOR(mnwt );
  PARAMETER_VECTOR(coh_eff); //  (styr-nages-age_st+1,endyr-age_st+3,3);
  PARAMETER_VECTOR( yr_eff); //  yr_eff(styr,endyr+3,3);

  Type nll = 0.0;

  Type sigma_coh = exp(log_sd_coh);
  Type sigma_yr  = exp(log_sd_yr );
  Type wt_pre;
   = mnwt*exp(sigma_yr*yr_eff(i));
예제 #9
0
Type objective_function<Type>::operator() ()
{
  // Data
  DATA_INTEGER( n_data );
  DATA_INTEGER( n_factors );
  DATA_FACTOR( Factor );
  DATA_VECTOR( Y );                
 DATA_VECTOR_INDICATOR(keep, Y);
  
  // Parameters
  PARAMETER( X0 );
  PARAMETER( log_SD0 );
  PARAMETER( log_SDZ );
  PARAMETER_VECTOR( Z );
  
  // Objective funcction
  Type jnll = 0;
  
  // Probability of data conditional on fixed and random effect values
  for( int i=0; i<n_data; i++){
    jnll -= dnorm( Y(i), X0 + Z(Factor(i)), exp(log_SD0), true );
  }
  
  // Probability of random coefficients
  for( int i=0; i<n_factors; i++){
    jnll -= dnorm( Z(i), Type(0.0), exp(log_SDZ), true );
  }
  
  // Reporting
  Type SDZ = exp(log_SDZ);
  Type SD0 = exp(log_SD0);
  ADREPORT( SDZ );
  REPORT( SDZ );
  ADREPORT( SD0 );
  REPORT( SD0 );
  ADREPORT( Z );
  REPORT( Z );
  ADREPORT( X0 );
  REPORT( X0 );
  
  // bias-correction testing
  Type MeanZ = Z.sum() / Z.size();
  Type SampleVarZ = ( (Z-MeanZ) * (Z-MeanZ) ).sum();
  Type SampleSDZ = pow( SampleVarZ + 1e-20, 0.5);
  REPORT( SampleVarZ );
  REPORT( SampleSDZ );  
  ADREPORT( SampleVarZ );
  ADREPORT( SampleSDZ );  
  

  return jnll;
}
예제 #10
0
Type objective_function<Type>::operator() () {
// data:
DATA_MATRIX(x_ij);
DATA_VECTOR(y_i);
DATA_IVECTOR(k_i); // vector of IDs
DATA_INTEGER(n_k); // number of IDs

// parameters:
PARAMETER_VECTOR(b_j)
PARAMETER_VECTOR(sigma_j);
PARAMETER(log_b0_sigma);
PARAMETER_VECTOR(b0_k);

int n_data = y_i.size(); // get number of data points to loop over

// Linear predictor
vector<Type> linear_predictor_i(n_data);
vector<Type> linear_predictor_sigma_i(n_data);
linear_predictor_i = x_ij*b_j;
linear_predictor_sigma_i = sqrt(exp(x_ij*sigma_j));

Type nll = 0.0; // initialize negative log likelihood

for(int i = 0; i < n_data; i++){
  nll -= dnorm(y_i(i), b0_k(k_i(i)) + linear_predictor_i(i) , linear_predictor_sigma_i(i), true);
}
for(int k = 0; k < n_k; k++){
  nll -= dnorm(b0_k(k), Type(0.0), exp(log_b0_sigma), true);
}

REPORT( b0_k );
REPORT(b_j );

ADREPORT( b0_k );
ADREPORT( b_j );

return nll;
}
예제 #11
0
Type objective_function<Type>::operator() () {
  // data:
  DATA_MATRIX(age);
  DATA_VECTOR(len);
  DATA_SCALAR(CV_e);
  DATA_INTEGER(num_reads);
  
  // parameters:
  PARAMETER(r0); // reference value
  PARAMETER(b); // growth displacement
  PARAMETER(k); // growth rate
  PARAMETER(m); // slope of growth
  PARAMETER(CV_Lt);
  
  PARAMETER(gam_shape);
  PARAMETER(gam_scale);
  
  PARAMETER_VECTOR(age_re);
  
  // procedures:
  Type n = len.size();
  
  Type nll = 0.0; // Initialize negative log-likelihood
  
  Type eps = 1e-5;
  
  CV_e = CV_e < 0.05 ? 0.05 : CV_e;
  
  for (int i = 0; i < n; i++) {
    Type x = age_re(i);
    if (!isNA(x) && isFinite(x)) {
      Type len_pred = pow(r0 + b * exp(k * x), m);
      
      Type sigma_e = CV_e * x + eps;
      Type sigma_Lt = CV_Lt * (len_pred + eps);
      
      nll -= dnorm(len(i), len_pred, sigma_Lt, true);
      nll -= dgamma(x + eps, gam_shape, gam_scale, true);
      
      for (int j = 0; j < num_reads; j++) {
        if (!isNA(age(j, i)) && isFinite(age(j, i)) && age(j, i) >= 0) {
          nll -= dnorm(age(j, i), x, sigma_e, true); 
        }
      }  
    }
  }
  
  return nll;
}
예제 #12
0
Type objective_function<Type>::operator() () {
  // data:
  DATA_MATRIX(age);
  DATA_VECTOR(len);
  DATA_SCALAR(CV_e);
  DATA_INTEGER(num_reads);
  
  // parameters:
  PARAMETER(a); // upper asymptote
  PARAMETER(b); // growth range
  PARAMETER(k); // growth rate
  PARAMETER(CV_Lt);
  
  PARAMETER(beta);
  
  PARAMETER_VECTOR(age_re);
  
  // procedures:
  Type n = len.size();
  
  Type nll = 0.0; // Initialize negative log-likelihood
  
  Type eps = 1e-5;

  
  CV_e = CV_e < 0.05 ? 0.05 : CV_e;
  
  for (int i = 0; i < n; i++) {
    Type x = age_re(i);
    if (!isNA(x) && isFinite(x)) {
      Type len_pred = a / (1 + b * exp(-k * x));
      
      Type sigma_e = CV_e * x + eps;
      Type sigma_Lt = CV_Lt * (len_pred + eps);
      
      nll -= dnorm(len(i), len_pred, sigma_Lt, true);
      nll -= dexp(x, beta, true);
      
      for (int j = 0; j < num_reads; j++) {
        if (!isNA(age(j, i)) && isFinite(age(j, i)) && age(j, i) >= 0) {
          nll -= dnorm(age(j, i), x, sigma_e, true); 
        }
      } 
    }
  }
  
  return nll;
}
예제 #13
0
Type objective_function<Type>::operator() ()
{
  DATA_VECTOR(y);

  PARAMETER(phi);
  PARAMETER(shape1);
  PARAMETER(shape2);
  PARAMETER(sd);
  PARAMETER_VECTOR(u);

  Type res = 0;
  res += density::AR1(phi)(u);
  vector<Type> unif = pnorm(u, Type(0), Type(1));
  vector<Type> x = qbeta(unif, shape1, shape2);
  res -= dnorm(y, x, sd, true).sum();
  return res;
}
예제 #14
0
Type objective_function<Type>::operator()()
{
  /* Data section */
  DATA_VECTOR(Y);                  // Counted abundance
  DATA_VECTOR_INDICATOR(keep, Y);  // For one-step predictions

  /* Parameter section */
  PARAMETER_VECTOR(X);  // Latent states. As last as long as Y;
                        // extra elements are not used
  PARAMETER(logr);      // Growth rate
  PARAMETER(logtheta);  // With theta=1, the Ricker model
  PARAMETER(logK);      // Carrying capacity
  PARAMETER(logQ);      // Process noise
  PARAMETER(logS);      // Sample size controlling measurement noise

  /* Procedure section */

  Type r = exp(logr);
  Type theta = exp(logtheta);
  Type K = exp(logK);
  Type Q = exp(logQ);
  Type S = exp(logS);

  int timeSteps = Y.size();
  Type nll = 0;

  // Contributions from state transitions
  for (int i = 1; i < timeSteps; i++) {
    Type m = X[i - 1] + r * (1.0 - pow(exp(X[i - 1]) / K, theta));
    nll -= dnorm(X[i], m, sqrt(Q), true);
  }

  // Contributions from observations
  for (int i = 0; i < timeSteps; i++) {
    nll -= keep(i) * dpois(Y[i], S * exp(X[i]),
                           true);  // keep(i) for one-step predictions
  }

  return nll;
}
예제 #15
0
Type objective_function<Type>::operator()()
{
  DATA_VECTOR(y);                  // Observations
  DATA_VECTOR_INDICATOR(keep, y);  // For one-step predictions

  DATA_SCALAR(huge);
  PARAMETER_VECTOR(x);
  PARAMETER(mu);
  PARAMETER(logsigma);
  PARAMETER(logs);

  // Initial condition
  Type nll = -dnorm(x(0), Type(0), huge, true);

  // Increments
  for (int i = 1; i < x.size(); ++i)
    nll -= dnorm(x(i), x(i - 1) + mu, exp(logsigma), true);

  // Observations
  for (int i = 0; i < y.size(); ++i)
    nll -= keep(i) * dnorm(y(i), x(i), exp(logs), true);

  return nll;
}
예제 #16
0
파일: track.cpp 프로젝트: cavios/tshydro
Type objective_function<Type>::operator() ()
{
  DATA_VECTOR(height);
  DATA_VECTOR(times);
  DATA_IVECTOR(timeidx);
  DATA_IARRAY(trackinfo);
  DATA_VECTOR(weights);

  PARAMETER(logSigma);
  PARAMETER(logSigmaRW);
  PARAMETER(logitp);
  PARAMETER_VECTOR(u);

  int timeSteps=times.size();
  int obsDim=height.size();
  int noTracks=trackinfo.dim[0];

  Type p=ilogit(logitp); 
  
  Type ans=0;
 
  Type sdRW=exp(logSigmaRW);
  for(int i=1;i<timeSteps;i++)
    ans += -dnorm(u(i),u(i-1),sdRW*sqrt(times(i)-times(i-1)),true); 

  Type sdObs=exp(logSigma);
  for(int t=0;t<noTracks;t++){
    vector<Type> sub=height.segment(trackinfo(t,0),trackinfo(t,2));
    vector<Type> subw=weights.segment(trackinfo(t,0),trackinfo(t,2));
    for(int i=0;i<trackinfo(t,2);i++){
      ans += nldens(sub(i),u(timeidx(trackinfo(t,0))-1),sdObs/sqrt(subw(i)),p);
    } 
  }

  return ans;
}
예제 #17
0
Type objective_function<Type>::operator() ()
{
  DATA_VECTOR(y);
  DATA_MATRIX(X)
  DATA_MATRIX(dd)
  PARAMETER_VECTOR(b);
  PARAMETER(a);
  PARAMETER(log_sigma);
  int n = dd.rows();

  // Construct joint negative log-likelihood
  joint_nll<Type> jnll(y, X, dd, b, a, log_sigma);

  // Random effect initial guess
  vector<Type> u(n);
  u.setZero();

  // Calculate Laplace approx (updates u)
  DATA_INTEGER(niter);
  Type res = laplace(jnll, u, niter);
  ADREPORT(u)

  return res;
}
예제 #18
0
Type objective_function<Type>::operator() () {
// data:
DATA_MATRIX(x_ij); // fixed effect model matrix
DATA_MATRIX(x_sigma_ij); // fixed effect model matrix
DATA_VECTOR(y_i); // response vector
DATA_IVECTOR(pholder_i); // vector of IDs for strategy
DATA_IVECTOR(strategy_i); // vector of IDs for permit holder
DATA_INTEGER(n_pholder); // number of IDs for pholder
DATA_INTEGER(n_strategy); // number of IDs for strategy
DATA_INTEGER(diversity_column); // fixed effect column position of diversity
DATA_VECTOR(b1_cov_re_i); // predictor data for random slope
DATA_VECTOR(b2_cov_re_i); // predictor data for random slope
/* DATA_VECTOR(b3_cov_re_i); // predictor data for random slope */
DATA_VECTOR(g1_cov_re_i); // predictor data for random slope
DATA_IVECTOR(spec_div_all_1); // indicator for if there is variability in diversity

// parameters:
PARAMETER_VECTOR(b_j);
PARAMETER_VECTOR(sigma_j);
PARAMETER(log_b0_pholder_tau);
// PARAMETER(log_b1_pholder_tau);
PARAMETER(log_b0_strategy_tau);
PARAMETER(log_b1_strategy_tau);
PARAMETER(log_b2_strategy_tau);
/* PARAMETER(log_b3_strategy_tau); */
PARAMETER_VECTOR(b0_pholder);
// PARAMETER_VECTOR(b1_pholder);
PARAMETER_VECTOR(b0_strategy);
PARAMETER_VECTOR(b1_strategy);
PARAMETER_VECTOR(b2_strategy);
/* PARAMETER_VECTOR(b3_strategy); */

//PARAMETER(log_g0_pholder_tau);
PARAMETER(log_g0_strategy_tau);
PARAMETER(log_g1_strategy_tau);
// PARAMETER_VECTOR(g0_pholder);
PARAMETER_VECTOR(g0_strategy);
PARAMETER_VECTOR(g1_strategy);

int n_data = y_i.size();

// Linear predictor
vector<Type> linear_predictor_i(n_data);
vector<Type> linear_predictor_sigma_i(n_data);
vector<Type> eta(n_data);
vector<Type> eta_sigma(n_data);
linear_predictor_i = x_ij*b_j;
linear_predictor_sigma_i = x_sigma_ij*sigma_j;

/* // set slope deviations that we can't estimate to 0: */
/* for(int i = 0; i < n_data; i++){ */
/*   if(spec_div_all_1(strategy_i(i)) == 1) { */
/*     b1_strategy(strategy_i(i)) = 0; */
/*     g1_strategy(strategy_i(i)) = 0; */
/*   } */
/* } */

Type nll = 0.0; // initialize negative log likelihood

for(int i = 0; i < n_data; i++){

  eta(i) = b0_pholder(pholder_i(i)) +
      // b1_pholder(pholder_i(i)) * b1_cov_re_i(i) +
      b0_strategy(strategy_i(i)) +
      b1_strategy(strategy_i(i)) * b1_cov_re_i(i) +
      b2_strategy(strategy_i(i)) * b2_cov_re_i(i) +
      /* b3_strategy(strategy_i(i)) * b3_cov_re_i(i) + */
      linear_predictor_i(i);

  eta_sigma(i) = sqrt(exp(
          // g0_pholder(pholder_i(i)) +
          g0_strategy(strategy_i(i)) +
          g1_strategy(strategy_i(i)) * g1_cov_re_i(i) +
          linear_predictor_sigma_i(i)));

  nll -= dnorm(y_i(i), eta(i), eta_sigma(i), true);
}

for(int k = 0; k < n_pholder; k++){
  nll -= dnorm(b0_pholder(k), Type(0.0), exp(log_b0_pholder_tau), true);
  // nll -= dnorm(g0_pholder(k), Type(0.0), exp(log_g0_pholder_tau), true);
  // nll -= dnorm(b1_pholder(k), Type(0.0), exp(log_b1_pholder_tau), true);
}

for(int k = 0; k < n_strategy; k++){
  nll -= dnorm(b0_strategy(k), Type(0.0), exp(log_b0_strategy_tau), true);
  nll -= dnorm(g0_strategy(k), Type(0.0), exp(log_g0_strategy_tau), true);

  // only include these species diversity slope deviations
  // if there was sufficient variation in species diversity
  // to estimate them:
  if(spec_div_all_1(k) == 0) {
    nll -= dnorm(b1_strategy(k), Type(0.0), exp(log_b1_strategy_tau), true);
    nll -= dnorm(g1_strategy(k), Type(0.0), exp(log_g1_strategy_tau), true);
  }

  nll -= dnorm(b2_strategy(k), Type(0.0), exp(log_b2_strategy_tau), true);
  /* nll -= dnorm(b3_strategy(k), Type(0.0), exp(log_b3_strategy_tau), true); */
}

// Reporting
/* Type b0_pholder_tau = exp(log_b0_pholder_tau); */
/* Type b0_strategy_tau = exp(log_b0_strategy_tau); */
// Type b1_tau = exp(log_b1_tau);
/* Type g0_pholder_tau = exp(log_g0_pholder_tau); */
/* Type g0_strategy_tau = exp(log_g0_strategy_tau); */
// Type g1_tau = exp(log_g1_tau);

vector<Type> combined_b1_strategy(n_strategy);
vector<Type> combined_g1_strategy(n_strategy);
for(int k = 0; k < n_strategy; k++){
  // these are fixed-effect slopes + random-effect slopes
  combined_b1_strategy(k) = b_j(diversity_column) + b1_strategy(k);
  combined_g1_strategy(k) = sigma_j(diversity_column) + g1_strategy(k);
}

/* REPORT(b0_pholder); */
REPORT(b0_strategy);
REPORT(eta);
REPORT(b1_strategy);
REPORT(b_j);
/* REPORT(g0_pholder); */
REPORT(g0_strategy);
REPORT(g1_strategy);
/* REPORT(b0_tau); */
// REPORT(b1_tau);
/* REPORT(g0_tau); */
// REPORT(g1_tau);
REPORT(combined_b1_strategy);
REPORT(combined_g1_strategy);

// /* ADREPORT(b0_pholder); */
// ADREPORT(b0_strategy);
// ADREPORT(b1_strategy);
// ADREPORT(b_j);
// /* ADREPORT(g0_pholder); */
// ADREPORT(g0_strategy);
// ADREPORT(g1_strategy);
// /* ADREPORT(b0_tau); */
// ADREPORT(b1_tau);
// /* ADREPORT(g0_tau); */
// ADREPORT(g1_tau);
ADREPORT(combined_b1_strategy);
ADREPORT(combined_g1_strategy);

return nll;
}
예제 #19
0
파일: poll.cpp 프로젝트: DanOvando/Feb2016
Type objective_function<Type>::operator() ()
{
  DATA_INTEGER(minAge);         
  DATA_INTEGER(maxAge);         
  DATA_INTEGER(minYear);        
  DATA_INTEGER(maxYear);        
  DATA_ARRAY(catchNo);        
  DATA_ARRAY(stockMeanWeight);
  DATA_ARRAY(propMature);     
  DATA_ARRAY(M);              
  DATA_INTEGER(minAgeS);        
  DATA_INTEGER(maxAgeS);        
  DATA_INTEGER(minYearS);       
  DATA_INTEGER(maxYearS);       
  DATA_SCALAR(surveyTime);     
  DATA_ARRAY(Q1);  

  PARAMETER_VECTOR(logN1Y);
  PARAMETER_VECTOR(logN1A);
  PARAMETER_VECTOR(logFY);
  PARAMETER_VECTOR(logFA);
  PARAMETER_VECTOR(logVarLogCatch);
  PARAMETER_VECTOR(logQ);
  PARAMETER(logVarLogSurvey);  

  int na=maxAge-minAge+1;
  int ny=maxYear-minYear+1;
  int nas=maxAgeS-minAgeS+1;
  int nys=maxYearS-minYearS+1;

  // setup F
  matrix<Type> F(ny,na);
  for(int y=0; y<ny; ++y){
    for(int a=0; a<na; ++a){
      F(y,a)=exp(logFY(y))*exp(logFA(a));
    }
  }
  // setup logN
  matrix<Type> logN(ny,na);
  for(int a=0; a<na; ++a){
    logN(0,a)=logN1Y(a);
  } 
  for(int y=1; y<ny; ++y){
    logN(y,0)=logN1A(y-1);
    for(int a=1; a<na; ++a){
      logN(y,a)=logN(y-1,a-1)-F(y-1,a-1)-M(y-1,a-1);
      if(a==(na-1)){
        logN(y,a)=log(exp(logN(y,a))+exp(logN(y,a-1)-F(y-1,a)-M(y-1,a)));
      }
    }
  }
  matrix<Type> predLogC(ny,na);
  for(int y=0; y<ny; ++y){
    for(int a=0; a<na; ++a){
      predLogC(y,a)=log(F(y,a))-log(F(y,a)+M(y,a))+log(Type(1.0)-exp(-F(y,a)-M(y,a)))+logN(y,a);
    }
  }

  Type ans=0; 
  for(int y=0; y<ny; ++y){
    for(int a=0; a<na; ++a){
      if(a==0){
        ans+= -dnorm(log(catchNo(y,a)),predLogC(y,a),exp(Type(0.5)*logVarLogCatch(0)),true);
      }else{
        ans+= -dnorm(log(catchNo(y,a)),predLogC(y,a),exp(Type(0.5)*logVarLogCatch(1)),true);
      }
    }
  }

  matrix<Type> predLogS(nys,nas);
  for(int y=0; y<nys; ++y){
    for(int a=0; a<nas; ++a){
			int sa        = a+(minAgeS-minAge);
			int sy        = y+(minYearS-minYear);
			predLogS(y,a) = logQ(a)-(F(sy,sa)+M(sy,sa))*surveyTime+logN(sy,sa);
			ans          += -dnorm(log(Q1(y,a)),predLogS(y,a),exp(Type(0.5)*logVarLogSurvey),true);
    }
  }

  vector<Type> ssb(ny);
  ssb.setZero();
  for(int y=0; y<=ny; ++y){
    for(int a=0; a<na; ++a){
    	std::cout<<y<<" "<<a<<" "<<"\n";
      ssb(y)+=exp(logN(y,a))*stockMeanWeight(y,a)*propMature(y,a);
    }
  }

  ADREPORT(ssb);
  return ans;
}
Type objective_function<Type>::operator() () {
// data:
DATA_MATRIX(x_ij);
DATA_VECTOR(y_i);
DATA_IVECTOR(k_i); // vector of IDs
DATA_INTEGER(n_k); // number of IDs
DATA_INTEGER(n_j); // number of IDs
DATA_VECTOR(b1_cov_re_i); // predictor data for random slope
DATA_VECTOR(sigma1_cov_re_i); // predictor data for random slope
//DATA_VECTOR(sigma2_cov_re_i); // predictor data for random slope

// parameters:
PARAMETER_VECTOR(b_j)
PARAMETER_VECTOR(sigma_j);
PARAMETER(log_b0_sigma);
PARAMETER_VECTOR(b0_k);
PARAMETER(log_b1_sigma);
PARAMETER_VECTOR(b1_k);
PARAMETER(log_sigma0_sigma);
PARAMETER(log_sigma1_sigma);
PARAMETER_VECTOR(sigma0_k);
PARAMETER_VECTOR(sigma1_k);

int n_data = y_i.size(); // get number of data points to loop over

// Linear predictor
vector<Type> linear_predictor_i(n_data);
vector<Type> linear_predictor_sigma_i(n_data);
linear_predictor_i = x_ij*b_j;
linear_predictor_sigma_i = x_ij*sigma_j;

Type nll = 0.0; // initialize negative log likelihood

for(int i = 0; i < n_data; i++){
  nll -= dnorm(
      y_i(i),

      b0_k(k_i(i)) + b1_k(k_i(i)) * b1_cov_re_i(i) +
      linear_predictor_i(i),

      sqrt(exp(
          sigma0_k(k_i(i)) +
          sigma1_k(k_i(i)) * sigma1_cov_re_i(i) +
          linear_predictor_sigma_i(i))),

      true);
}
for(int k = 0; k < n_k; k++){
  nll -= dnorm(b0_k(k), Type(0.0), exp(log_b0_sigma), true);
  nll -= dnorm(b1_k(k), Type(0.0), exp(log_b1_sigma), true);
  nll -= dnorm(sigma0_k(k), Type(0.0), exp(log_sigma0_sigma), true);
  nll -= dnorm(sigma1_k(k), Type(0.0), exp(log_sigma1_sigma), true);
  //nll -= dnorm(sigma2_k(k), Type(0.0), exp(log_sigma2_sigma), true);
}

// Reporting
Type b0_sigma = exp(log_b0_sigma);
Type b1_sigma = exp(log_b1_sigma);
Type sigma0_sigma = exp(log_sigma0_sigma);
Type sigma1_sigma = exp(log_sigma1_sigma);
//Type sigma2_sigma = exp(log_sigma2_sigma);

vector<Type> b1_b1_k(n_k);
vector<Type> sigma1_sigma1_k(n_k);
for(int k = 0; k < n_k; k++){
  // these are fixed-effect slopes + random-effect slopes
  b1_b1_k(k) = b_j(n_j) + b1_k(k);
  sigma1_sigma1_k(k) = sigma_j(n_j) + sigma1_k(k);
}

REPORT( b0_k );
REPORT( b1_k );
REPORT( b_j );
REPORT( sigma0_k );
REPORT( sigma1_k );
//REPORT( sigma2_k );
REPORT(b0_sigma);
REPORT(b1_sigma);
REPORT(sigma0_sigma);
REPORT(sigma1_sigma);
//REPORT(sigma2_sigma);
REPORT(b1_b1_k);
REPORT(sigma1_sigma1_k);

//ADREPORT( b0_k );
//ADREPORT( b1_k );
//ADREPORT( b_j );
//ADREPORT( sigma0_k );
//ADREPORT( sigma1_k );
//ADREPORT( sigma2_k );
//ADREPORT(b0_sigma);
//ADREPORT(b1_sigma);
//ADREPORT(sigma0_sigma);
//ADREPORT(sigma1_sigma);
//ADREPORT(sigma2_sigma);
//ADREPORT(b1_b1_k);
//ADREPORT(sigma1_sigma1_k);

return nll;
}
예제 #21
0
Type objective_function<Type>::operator() ()
{
  DATA_STRING(distr);
  DATA_INTEGER(n);
  Type ans = 0;

  if (distr == "norm") {
    PARAMETER(mu);
    PARAMETER(sd);
    vector<Type> x = rnorm(n, mu, sd);
    ans -= dnorm(x, mu, sd, true).sum();
  }
  else if (distr == "gamma") {
    PARAMETER(shape);
    PARAMETER(scale);
    vector<Type> x = rgamma(n, shape, scale);
    ans -= dgamma(x, shape, scale, true).sum();
  }
  else if (distr == "pois") {
    PARAMETER(lambda);
    vector<Type> x = rpois(n, lambda);
    ans -= dpois(x, lambda, true).sum();
  }
  else if (distr == "compois") {
    PARAMETER(mode);
    PARAMETER(nu);
    vector<Type> x = rcompois(n, mode, nu);
    ans -= dcompois(x, mode, nu, true).sum();
  }
  else if (distr == "compois2") {
    PARAMETER(mean);
    PARAMETER(nu);
    vector<Type> x = rcompois2(n, mean, nu);
    ans -= dcompois2(x, mean, nu, true).sum();
  }
  else if (distr == "nbinom") {
    PARAMETER(size);
    PARAMETER(prob);
    vector<Type> x = rnbinom(n, size, prob);
    ans -= dnbinom(x, size, prob, true).sum();
  }
  else if (distr == "nbinom2") {
    PARAMETER(mu);
    PARAMETER(var);
    vector<Type> x = rnbinom2(n, mu, var);
    ans -= dnbinom2(x, mu, var, true).sum();
  }
  else if (distr == "exp") {
    PARAMETER(rate);
    vector<Type> x = rexp(n, rate);
    ans -= dexp(x, rate, true).sum();
  }
  else if (distr == "beta") {
    PARAMETER(shape1);
    PARAMETER(shape2);
    vector<Type> x = rbeta(n, shape1, shape2);
    ans -= dbeta(x, shape1, shape2, true).sum();
  }
  else if (distr == "f") {
    PARAMETER(df1);
    PARAMETER(df2);
    vector<Type> x = rf(n, df1, df2);
    ans -= df(x, df1, df2, true).sum();
  }
  else if (distr == "logis") {
    PARAMETER(location);
    PARAMETER(scale);
    vector<Type> x = rlogis(n, location, scale);
    ans -= dlogis(x, location, scale, true).sum();
  }
  else if (distr == "t") {
    PARAMETER(df);
    vector<Type> x = rt(n, df);
    ans -= dt(x, df, true).sum();
  }
  else if (distr == "weibull") {
    PARAMETER(shape);
    PARAMETER(scale);
    vector<Type> x = rweibull(n, shape, scale);
    ans -= dweibull(x, shape, scale, true).sum();
  }
  else if (distr == "AR1") {
    PARAMETER(phi);
    vector<Type> x(n);
    density::AR1(phi).simulate(x);
    ans += density::AR1(phi)(x);
  }
  else if (distr == "ARk") {
    PARAMETER_VECTOR(phi);
    vector<Type> x(n);
    density::ARk(phi).simulate(x);
    ans += density::ARk(phi)(x);
  }
  else if (distr == "MVNORM") {
    PARAMETER(phi);
    matrix<Type> Sigma(5,5);
    for(int i=0; i<Sigma.rows(); i++)
      for(int j=0; j<Sigma.rows(); j++)
        Sigma(i,j) = exp( -phi * abs(i - j) );
    density::MVNORM_t<Type> nldens = density::MVNORM(Sigma);
    for(int i = 0; i<n; i++) {
      vector<Type> x = nldens.simulate();
      ans += nldens(x);
    }
  }
  else if (distr == "SEPARABLE") {
    PARAMETER(phi1);
    PARAMETER_VECTOR(phi2);
    array<Type> x(100, 200);
    SEPARABLE( density::ARk(phi2), density::AR1(phi1) ).simulate(x);
    ans += SEPARABLE( density::ARk(phi2), density::AR1(phi1) )(x);
  }
  else if (distr == "GMRF") {
    PARAMETER(delta);
    matrix<Type> Q0(5, 5);
    Q0 <<
      1,-1, 0, 0, 0,
     -1, 2,-1, 0, 0,
      0,-1, 2,-1, 0,
      0, 0,-1, 2,-1,
      0, 0, 0,-1, 1;
    Q0.diagonal().array() += delta;
    Eigen::SparseMatrix<Type> Q = asSparseMatrix(Q0);
    vector<Type> x(5);
    for(int i = 0; i<n; i++) {
      density::GMRF(Q).simulate(x);
      ans += density::GMRF(Q)(x);
    }
  }
  else if (distr == "SEPARABLE_NESTED") {
    PARAMETER(phi1);
    PARAMETER(phi2);
    PARAMETER(delta);
    matrix<Type> Q0(5, 5);
    Q0 <<
      1,-1, 0, 0, 0,
     -1, 2,-1, 0, 0,
      0,-1, 2,-1, 0,
      0, 0,-1, 2,-1,
      0, 0, 0,-1, 1;
    Q0.diagonal().array() += delta;
    Eigen::SparseMatrix<Type> Q = asSparseMatrix(Q0);
    array<Type> x(5, 6, 7);
    for(int i = 0; i<n; i++) {
      SEPARABLE(density::AR1(phi2),
                SEPARABLE(density::AR1(phi1),
                          density::GMRF(Q) ) ).simulate(x);
      ans += SEPARABLE(density::AR1(phi2),
                       SEPARABLE(density::AR1(phi1),
                                 density::GMRF(Q) ) )(x);
    }
  }
  else error( ("Invalid distribution '" + distr + "'").c_str() );
  return ans;
}
Type objective_function<Type>::operator() ()
{
  // Data
  DATA_INTEGER( like ); // define likelihood type, 1==delta lognormal, 2==delta gamma
  DATA_VECTOR( y_i ); // observations
  DATA_MATRIX( X_ij ); // covariate design matrix
  DATA_VECTOR( include ); //0== include in NLL, 1== exclude from NLL

  // Parameters
  PARAMETER_VECTOR( b_j ); // betas to generate expected values
  PARAMETER_VECTOR( theta_z ); // variances

  // Transformations
  Type zero_prob = 1 / (1 + exp(-theta_z(0)));
  Type sd = exp(theta_z(1)); //standard deviation (lognormal), scale parameter theta (gamma)
  int n_data = y_i.size();

  Type jnll = 0;
  Type pred_jnll = 0;
  vector<Type> jnll_i(n_data);


  // linear predictor
  vector<Type> logpred_i( n_data );
  logpred_i = X_ij * b_j; 


  // Delta lognormal
  if(like==1){
  	for( int i=0; i<n_data; i++){
      if(y_i(i)==0) jnll_i(i) -= log( zero_prob );
      if(y_i(i)!=0) jnll_i(i) -= log( 1-zero_prob ) + dlognorm( y_i(i), logpred_i(i), sd, true );
    
      // Running counter
      if( include(i)==0 ) jnll += jnll_i(i);
      if( include(i)==1 ) pred_jnll += jnll_i(i);
    }
  }

  // Delta gamma
  if(like==2){
    for(int i=0; i<n_data; i++){
    	if(y_i(i)==0) jnll_i(i) -= log( zero_prob );
    	if(y_i(i)!=0) jnll_i(i) -= log( 1-zero_prob ) + dgamma( y_i(i), pow(sd,-2), exp(logpred_i(i))*pow(sd,2), true );

        // Running counter
        if( include(i)==0 ) jnll += jnll_i(i);
        if( include(i)==1 ) pred_jnll += jnll_i(i);
    }
  }



  
  // Reporting
  REPORT( zero_prob );
  REPORT( sd );
  REPORT( logpred_i );
  REPORT( b_j );
  REPORT( pred_jnll );
  REPORT( jnll_i );
  return jnll;
}