Пример #1
0
/**
 * Description not yet available.
 * \param
 */
dvar_vector sfabs(const dvar_vector& t1)
  {
     RETURN_ARRAYS_INCREMENT();

     dvar_vector tmp(t1.indexmin(),t1.indexmax());

     for (int i=t1.indexmin(); i<=t1.indexmax(); i++)
     {
       tmp.elem(i)=sfabs(t1.elem(i));
     }
     RETURN_ARRAYS_DECREMENT();
     return(tmp);
  }
Пример #2
0
/** Compute the dot product of two variable type vectors. The minimum and maxium
  legal subscripts of the arguments must agree; otherwize an error message
   is printed and execution terminates.
  \ingroup matop
  \param v1 A dvar_vector, \f$a\f$.
  \param v2 A dvar_vector, \f$b\f$.
  \return A dvariable, \f$z = a\cdot b = \sum_i a_i\cdot b_i\f$  containing
  the value of the dot product of the two arguments.
*/
dvariable operator*(const dvar_vector& v1, const dvar_vector& v2)
{
    RETURN_ARRAYS_INCREMENT();
    if (v1.indexmin()!=v2.indexmin()||v1.indexmax()!=v2.indexmax())
    {
        cerr << "Incompatible bounds in "
             "prevariable operator * (const dvar_vector& v1, const dvar_vector& v2)"
             << endl;
        ad_exit(1);
    }
    double tmp=0;

#ifndef USE_ASSEMBLER
    int mmin=v1.indexmin();
    int mmax=v1.indexmax();
#ifdef OPT_LIB
    double * pt1=&v1.elem_value(mmin);
    double * pt1m=&v1.elem_value(mmax);
    double * pt2=&v2.elem_value(mmin);
    do
    {
        tmp+= *pt1++ * *pt2++;
    }
    while (pt1<=pt1m);
#else
    for (int i=mmin; i<=mmax; i++)
    {
        tmp+=v1.elem_value(i)*v2.elem_value(i);
    }
#endif
#else
    int mmin=v1.indexmin();
    int n=v1.indexmax()-mmin+1;
    dp_dotproduct(&tmp,&(v1.elem_value(mmin)),&(v2.elem_value(mmin)),n);
#endif

    dvariable vtmp=nograd_assign(tmp);

    // The derivative list considerations
    save_identifier_string("bbbb");
    v1.save_dvar_vector_value();
    v1.save_dvar_vector_position();
    v2.save_dvar_vector_value();
    v2.save_dvar_vector_position();
    vtmp.save_prevariable_position();
    save_identifier_string("aaaa");
    gradient_structure::GRAD_STACK1->
    set_gradient_stack(dvdv_dot);
    RETURN_ARRAYS_DECREMENT();
    return vtmp;
}
Пример #3
0
/**
 * @brief Get initial vector of new shell and oldshell crabs at equilibrium
 * @ingroup GMACS
 * @authors Steve Martell and John Levitt
 * @date Jan 3, 2015.
 * 
 * @param[out] n vector of numbers at length in new shell condition
 * @param[out] o vector of numbers of old shell crabs at length
 * @param[in] A size transition matrix
 * @param[in] S diagonal matrix of length specific survival rates
 * @param[in] P diagonal matrix of length specific molting probabilities
 * @param[in] r vector of new recruits at length.
 * 
 * @details 
 * Jan 3, 2015.  Working with John Levitt on analytical solution instead of the 
 * numerical approach.  Think we have a soln.
 * 	
 * Notation: \n
 * \f$n\f$ = vector of newshell crabs \n
 * \f$o\f$ = vector of oldshell crabs \n
 * \f$P\f$ = diagonal matrix of molting probabilities by size \n
 * \f$S\f$ = diagonal matrix of survival rates by size \n
 * \f$A\f$ = Size transition matrix \n
 * \f$r\f$ = vector of new recruits (newshell) \n
 * \f$I\f$ = identity matrix. \n
 *
 * 	
 * The following equations represent the dynamics of newshell \a n and oldshell crabs.
 * 		\f{align*}{
 * 		 n &= nSPA + oSPA + r	\\		
 * 		 o &= oS(I-P) + nS(I-P) 
 * 		\f}
 * Objective is to solve the above equations for \f$n\f$ and \f$o\f$ repsectively.  
 * First, lets solve the second equation for \f$o\f$:
 * 		\f{align*}{
 * 		o &= n(I-P)S[I-(I-P)S]^{-1}
 * 		\f}
 * next substitute the above expression into first equation above and solve for \f$n\f$
 * 		\f{align*}{
 * 		n &= nPSA + n(I-P)S[I-(I-P)S]^{-1}PSA + r      \\
 * 		\mbox{let} \quad \beta& = [I-(I-P)S]^{-1},       \\
 * 		r &= n - nPSA - n(I-P)S \beta PSA               \\
 * 		r &= n(I - PSA - (I-P)S \beta PSA) 					\\
 * 		\mbox{let} \quad C& = (I - PSA - (I-P)S \beta PSA),    \\
 * 		n &= (C)^{-1} (r)
 * 		\f}
 * Note that \f$C\f$ must be invertable to solve for the equilibrium solution for \f$n\f$.
 * So the diagonal elements of \f$P\f$ and \f$S\f$ must be positive non-zero numbers.
 * 	
 * 	
 */
void calc_equilibrium(dvar_vector& n,
                      dvar_vector& o,
                      const dvar_matrix& A,
                      const dvar_matrix& S,
                      const dvar_matrix& P,
                      const dvar_vector& r)
{
	int nclass = n.indexmax();
	dmatrix Id = identity_matrix(1,nclass);
	dvar_matrix B(1,nclass,1,nclass);
	dvar_matrix C(1,nclass,1,nclass);
	dvar_matrix D(1,nclass,1,nclass);

	

	B = inv(Id - (Id-P)*S);
	C = P * S * A;
	D = trans(Id - C - (Id-P)*S*B*C);

	// COUT(A);
	// COUT(inv(D)*r);

	n = solve(D,r);			// newshell
	o = n*((Id-P)*S*B);		// oldshell

}	
Пример #4
0
/**
 * Description not yet available.
 * \param
 */
dvariable ghk_choleski(const dvar_vector& lower,const dvar_vector& upper,
  const dvar_matrix& ch, const dmatrix& eps)
{
  RETURN_ARRAYS_INCREMENT();
  int n=lower.indexmax();
  int m=eps.indexmax();
  dvariable ssum=0.0;
  dvar_vector l(1,n);
  dvar_vector u(1,n);

  for (int k=1;k<=m;k++)
  {
    dvariable weight=1.0;
    l=lower;
    u=upper;
    for (int j=1;j<=n;j++)
    {
      l(j)/=ch(j,j);
      u(j)/=ch(j,j);
      dvariable Phiu=cumd_norm(u(j));
      dvariable Phil=cumd_norm(l(j));
      weight*=Phiu-Phil;
      dvariable eta=inv_cumd_norm((Phiu-Phil)*eps(k,j)+Phil);
      for (int i=j+1;i<=n;i++)
      {
        dvariable tmp=ch(i,j)*eta;
        l(i)-=tmp;
        u(i)-=tmp;
      }
    }
    ssum+=weight;
  }
  RETURN_ARRAYS_DECREMENT();
  return ssum/m;
}
Пример #5
0
/**
 * Description not yet available.
 * \param
 */
dvar_vector operator-(const dvar_vector& t1, const double x)
  {
    RETURN_ARRAYS_INCREMENT();
    dvar_vector tmp(t1.indexmin(),t1.indexmax());
    save_identifier_string("ucbb");
    for (int i=t1.indexmin(); i<=t1.indexmax(); i++)
    {
      tmp.elem_value(i)=t1.elem_value(i)-x;
    }
    tmp.save_dvar_vector_position();
    t1.save_dvar_vector_position();
    save_identifier_string("dduu");
    RETURN_ARRAYS_DECREMENT();
    gradient_structure::GRAD_STACK1->set_gradient_stack(DF_dv_cdble_diff);
    return(tmp);
  }
Пример #6
0
/**
 * Description not yet available.
 * \param
 */
dvariable ghk_m(const dvar_vector& upper,const dvar_matrix& Sigma,
  const dmatrix& eps)
{
  RETURN_ARRAYS_INCREMENT();
  int n=upper.indexmax();
  int m=eps.indexmax();
  dvariable ssum=0.0;
  dvar_vector u(1,n);
  dvar_matrix ch=choleski_decomp(Sigma);

  for (int k=1;k<=m;k++)
  {
    dvariable weight=1.0;
    u=upper;
    for (int j=1;j<=n;j++)
    {
      u(j)/=ch(j,j);
      dvariable Phiu=cumd_norm(u(j));
      weight*=Phiu;
      dvariable eta=inv_cumd_norm(1.e-30+Phiu*eps(k,j));
      for (int i=j+1;i<=n;i++)
      {
        dvariable tmp=ch(i,j)*eta;
        u(i)-=tmp;
      }
    }
    ssum+=weight;
  }
  RETURN_ARRAYS_DECREMENT();
  return ssum/m;
}
Пример #7
0
/**
 * Description not yet available.
 * \param
 */
dvariable ghk(const dvar_vector& lower,const dvar_vector& upper,
  const dvar_matrix& Sigma, const dmatrix& eps,int _i)
{
  RETURN_ARRAYS_INCREMENT();
  int n=lower.indexmax();
  dvar_matrix ch=choleski_decomp(Sigma);
  dvar_vector l(1,n);
  dvar_vector u(1,n);

  ghk_test(eps,_i);

  dvariable weight=1.0;
  int k=_i;
  {
    l=lower;
    u=upper;
    for (int j=1;j<=n;j++)
    {
      l(j)/=ch(j,j);
      u(j)/=ch(j,j);
      dvariable Phiu=cumd_norm(u(j));
      dvariable Phil=cumd_norm(l(j));
      weight*=Phiu-Phil;
      dvariable eta=inv_cumd_norm((Phiu-Phil)*eps(k,j)+Phil+1.e-30);
      for (int i=j+1;i<=n;i++)
      {
        dvariable tmp=ch(i,j)*eta;
        l(i)-=tmp;
        u(i)-=tmp;
      }
    }
  }
  RETURN_ARRAYS_DECREMENT();
  return weight;
}
Пример #8
0
/**
 * Description not yet available.
 * \param
 */
dvar_vector operator+(const prevariable& x, const dvar_vector& t1)
  {
    RETURN_ARRAYS_INCREMENT();
    dvar_vector tmp(t1.indexmin(),t1.indexmax());
    save_identifier_string("wcbf");
    x.save_prevariable_position();
    for (int i=t1.indexmin(); i<=t1.indexmax(); i++)
    {
      tmp.elem_value(i)=t1.elem_value(i)+value(x);
    }
    tmp.save_dvar_vector_position();
    t1.save_dvar_vector_position();
    save_identifier_string("dduu");
    RETURN_ARRAYS_DECREMENT();
    gradient_structure::GRAD_STACK1->set_gradient_stack(DF_dble_dv_add);
    return(tmp);
  }
Пример #9
0
dvar_vector posfun(const dvar_vector& x, const double& eps, dvariable& pen)
  {
    int i;
    dvar_vector xp(x.indexmin(),x.indexmax());
    for(i=x.indexmin();i<=x.indexmax();i++)
    {
        if(x(i)>=eps)
        {
            xp(i) = x(i);
        }
        else
        {
            pen += 0.01*square(x(i)-eps);
            xp(i) = eps/(2.-x(i)/eps);
        }
    }
    return(xp);
  }
Пример #10
0
/**
 * Description not yet available.
 * \param
 */
dvar_vector inv_cumd_norm(const dvar_vector& x)
{
  int mmin=x.indexmin();
  int mmax=x.indexmax();
  dvar_vector tmp(mmin,mmax);
  for (int i=mmin;i<=mmax;i++)
  {
    tmp(i)=inv_cumd_norm(x(i));
  }
  return tmp;
}
Пример #11
0
/**
 * Description not yet available.
 * \param
 */
dvar_vector posfun(const dvar_vector&x,double eps,const prevariable& _pen)
{
  int mmin=x.indexmin();
  int mmax=x.indexmax();
  dvar_vector tmp(mmin,mmax);
  for (int i=mmin;i<=mmax;i++)
  {
    tmp(i)=posfun(x(i),eps,_pen);
  }
  return tmp;
}
Пример #12
0
void set_value_inv_mc(const dvar_vector& x, const dvector& _v, const int& _ii,
  const double fmin, const double fmax)
{
  dvector& v=(dvector&) _v;
  int& ii=(int&) _ii;
  int min=x.indexmin();
  int max=x.indexmax();
  for (int i=min;i<=max;i++)
  {
    v(ii++)=set_value_inv_mc(x(i),fmin,fmax);
  }
}
Пример #13
0
dvariable robust_regression(dvector& obs, dvar_vector& pred, 
  const double& cutoff) 
{
  if (obs.indexmin() != pred.indexmin() || obs.indexmax() != pred.indexmax() )
  {
    cerr << "Index limits on observed vector are not equal to the Index\n\
 limits on the predicted vector in robust_reg_likelihood function\n";
  }
  RETURN_ARRAYS_INCREMENT(); //Need this statement because the function
			     //returns a variable type
  int min=obs.indexmin();
  int max=obs.indexmax();
  dvariable sigma_hat;
  dvariable sigma_tilde; 
  int nobs = max-min+1;
  double width=3.0;
  double pcon=0.05;
  double width2=width*width;
  dvariable zpen;
  zpen=0.0;
  double a,a2;
  a=cutoff; 
     // This bounds a between 0.05 and 1.75
  a2=a*a;
  dvariable tmp,tmp2,tmp4,sum_square,v_hat;
  dvar_vector diff_vec = obs-pred;
  tmp = norm(diff_vec);
  sum_square = tmp * tmp;
  v_hat = 1.e-80 + sum_square/nobs;
  sigma_hat=pow(v_hat,.5);
  sigma_tilde=a*sigma_hat;
  double b=2.*pcon/(width*sqrt(PI));
  dvariable log_likelihood;
  dvariable div1;
  dvariable div2,div4;
  div1 = 2*(a2*v_hat);
  div2 = width2*(a2*v_hat);
  div4 = div2*div2;
  log_likelihood = 0;
  for (int i=min; i<=max; i++)
  {
    tmp=diff_vec[i];
    tmp2=tmp*tmp;
    tmp4=tmp2*tmp2;
    log_likelihood -= log((1-pcon)*exp(-tmp2/div1)+b/(1.+tmp4/div4) );
  }
  log_likelihood += nobs*log(a2*v_hat)/2.;
  log_likelihood += zpen;
  RETURN_ARRAYS_DECREMENT(); // Need this to decrement the stack increment
			     // caused by RETURN_ARRAYS_INCREMENT();
  return(log_likelihood);  
}
Пример #14
0
/**
 * @ingroup GMACS
 * @brief Calculate equilibrium vector n given A, S and r
 * @details Solving a matrix equation for the equilibrium number
 * of crabs in length interval.
 * 
 * 
 * 
 * @param[out] n vector of numbers at length
 * @param[in] A size transition matrix
 * @param[in] S diagonal matrix of length specific survival rates
 * @param[in] r vector of new recruits at length.
 */
void calc_equilibrium(dvar_vector& n,
                      const dvar_matrix& A,
                      const dvar_matrix& S,
                      const dvar_vector r)
{
	int nclass = n.indexmax();
	dmatrix Id = identity_matrix(1,nclass);
	dvar_matrix At(1,nclass,1,nclass);

	At = trans(A*S);
	n  = -solve(At-Id,r);

}
Пример #15
0
/**
 * Description not yet available.
 * \param
 */
void make_indvar_list(const dvar_vector& t)
{
  if (!gradient_structure::instances)
  {
    return;
  }
  if ((unsigned int)(t.indexmax()-t.indexmin()+1)
    > gradient_structure::MAX_NVAR_OFFSET)
  {
   if (ad_printf)
   {
     (*ad_printf)("Current maximum number of independent variables is %d\n",
        gradient_structure::MAX_NVAR_OFFSET);
     (*ad_printf)("  You need to increase the global variable "
     "MAX_NVAR_OFFSET to %d\n",t.indexmax()-t.indexmin()+1);
     (*ad_printf)("  This can be done by putting the line\n"
         "    gradient_structure::set_MAX_NVAR_OFFSET(%d);\n",
        t.indexmax()-t.indexmin()+1);
     (*ad_printf)("  before the declaration of the gradient_structure object.\n"
        " or the command line option -mno %d\n",
        t.indexmax()-t.indexmin()+1);
   }
   else
   {
     cerr << "Current maximum number of independent variables is "
          << gradient_structure::MAX_NVAR_OFFSET << "\n"
          <<  "  You need to increase the global variable MAX_NVAR_OFFSET to "
          << (t.indexmax()-t.indexmin()+1) << "\n"
          << "  This can be done by putting the line\n"
          << "    'gradient_structure::set_MAX_NVAR_OFFSET("
          << (t.indexmax()-t.indexmin()+1) << ");'\n"
          << "  before the declaration of the gradient_structure object.\n"
          << " or use the -mno 1149 command line option in AD Model Builder\n";
   }
   ad_exit(21);
  }

  for (int i=t.indexmin(); i<=t.indexmax(); i++)
  {
    unsigned int tmp = (unsigned int)(i - t.indexmin());
    gradient_structure::INDVAR_LIST->put_address(tmp,&(t.va[i].x));
  }
  gradient_structure::NVAR=t.indexmax()-t.indexmin()+1;
}
Пример #16
0
/**
Allocate dvar_vector using indexes from v1.
*/
void dvar_vector::allocate(const dvar_vector& v1)
{
  allocate(v1.indexmin(), v1.indexmax());
}
Пример #17
0
/**
Copy index and pointer to values from v.

\param vv a dvar_vector
*/
dvar_vector_position::dvar_vector_position(const dvar_vector& v)
{
  min=v.indexmin();
  max=v.indexmax();
  va=v.get_va();
}