예제 #1
0
파일: main.cpp 프로젝트: regmi/hermes2d
Scalar F_cranic(int n, double *wt, Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext)
{
  Scalar result = 0;
  Func<Scalar>* titer = ext->fn[0];
  Func<Scalar>* tprev = ext->fn[1];
  for (int i = 0; i < n; i++)
    result += wt[i] * (HEATCAP * (titer->val[i] - tprev->val[i]) * v->val[i] / TAU +
                       0.5 * lam(titer->val[i]) * (titer->dx[i] * v->dx[i] + titer->dy[i] * v->dy[i]) +
                       0.5 * lam(tprev->val[i]) * (tprev->dx[i] * v->dx[i] + tprev->dy[i] * v->dy[i]));
  return result;
}
예제 #2
0
Scalar F_cranic(int n, double *wt, Func<Real> *u_ext[], Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext)
{
  Scalar result = 0;
  Func<Scalar>* u_prev_newton = u_ext[0];
  Func<Scalar>* u_prev_time = ext->fn[0];
  for (int i = 0; i < n; i++)
    result += wt[i] * ((u_prev_newton->val[i] - u_prev_time->val[i]) * v->val[i] / TAU
                       + 0.5 * lam(u_prev_newton->val[i]) * (u_prev_newton->dx[i] * v->dx[i] + u_prev_newton->dy[i] * v->dy[i])
                       + 0.5 * lam(u_prev_time->val[i]) * (u_prev_time->dx[i] * v->dx[i] + u_prev_time->dy[i] * v->dy[i])
                       - heat_src(e->x[i], e->y[i]) * v->val[i]);
  return result;
}
예제 #3
0
파일: LLL.c 프로젝트: Macaulay2/Singular
static
long image(ZZ& det, mat_ZZ& B, mat_ZZ* U, long verbose)
{
   long m = B.NumRows();
   long n = B.NumCols();

   long force_reduce = 1;

   vec_long P;
   P.SetLength(m);

   vec_ZZ D;
   D.SetLength(m+1);
   D[0] = 1;

   vec_vec_ZZ lam;

   lam.SetLength(m);

   long j;
   for (j = 1; j <= m; j++)
      lam(j).SetLength(m);

   if (U) ident(*U, m);

   long s = 0;

   long k = 1;
   long max_k = 0;


   while (k <= m) {
      if (k > max_k) {
         IncrementalGS(B, P, D, lam, s, k);
         max_k = k;
      }

      if (k == 1) {
         force_reduce = 1;
         k++;
         continue;
      }

      if (force_reduce)
         for (j = k-1; j >= 1; j--)
            reduce(k, j, B, P, D, lam, U);

      if (P(k-1) != 0 && P(k) == 0) {
         force_reduce = swap(k, B, P, D, lam, U, max_k, verbose);
         k--;
      }
      else {
         force_reduce = 1;
         k++;
      }
   }

   det = D[s];
   return s;
}
예제 #4
0
TEST(Interface, Simple){

    linear_ip::Vector c(5) ,b(3);
    linear_ip::Matrix A(3, 5);

    b << 6, 12, 10;
    c << 2, 1, 0, 0, 0;
    A << 1, 1, -1, 0, 0,
        1, 4, 0, -1, 0,
        1, 2, 0, 0, 1;

    linear_ip::lp P(c, A, b);

    P.solve();

    linear_ip::Vector x(5), s(5), lam(3);
    
    x << 2, 4, 0, 6, 0;
    s << 0, 0, 3, 0, 1;
    lam << 3, 0, -1;

    EXPECT_TRUE(x.isApprox(P.get_x(), 1e-5));
    EXPECT_TRUE(s.isApprox(P.get_s(), 1e-5));
    EXPECT_TRUE(lam.isApprox(P.get_lam(), 1e-5));

}
예제 #5
0
파일: pr82878.C 프로젝트: MaxKellermann/gcc
void
bar (A b)
{
  void (*lam) (A) = [](A) { baz (); };

  if (auto c = b)
    lam (c);
}
예제 #6
0
파일: main.cpp 프로젝트: Zhonghua/hermes2d
Scalar res(int n, double *wt, Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext)
{
    Scalar result = 0;
    Func<Scalar>* u_prev = ext->fn[0];
    for (int i = 0; i < n; i++)
        result += wt[i] * (lam(u_prev->val[i]) * (u_prev->dx[i] * v->dx[i] + u_prev->dy[i] * v->dy[i])
                           - heat_src(e->x[i], e->y[i]) * v->val[i]);
    return result;
}
예제 #7
0
파일: forms.cpp 프로젝트: alieed/hermes
Scalar bilinear_form(int n, double *wt, Func<Real> *u_ext[], Func<Real> *u, Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext)
{
  Scalar result = 0;
  Func<Scalar>* u_prev = ext->fn[0];
  for (int i = 0; i < n; i++)
    result += wt[i] * (lam(u_prev->val[i]) * (u->dx[i] * v->dx[i] + u->dy[i] * v->dy[i]));

  return result;
}
예제 #8
0
Scalar res_ss(int n, double *wt, Func<Real> *u_ext[], Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext, double t)
{
  Scalar result = 0;
  Func<Scalar>* Y_prev_newton = u_ext[0];
  for (int i = 0; i < n; i++) {
    result += wt[i] * (lam(Y_prev_newton->val[i]) * (Y_prev_newton->dx[i] * v->dx[i] + Y_prev_newton->dy[i] * v->dy[i])
                       - heat_src(e->x[i], e->y[i], t) * v->val[i]);
  }
  return result;
}
예제 #9
0
파일: main.cpp 프로젝트: Zhonghua/hermes2d
Scalar jac(int n, double *wt, Func<Real> *u, Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext)
{
    Scalar result = 0;
    Func<Scalar>* u_prev = ext->fn[0];
    for (int i = 0; i < n; i++)
        result += wt[i] * (dlam_du(u_prev->val[i]) * u->val[i] * (u_prev->dx[i] * v->dx[i] + u_prev->dy[i] * v->dy[i])
                           + lam(u_prev->val[i]) * (u->dx[i] * v->dx[i] + u->dy[i] * v->dy[i]));

    return result;
}
예제 #10
0
파일: main.cpp 프로젝트: regmi/hermes2d
Scalar J_euler(int n, double *wt, Func<Real> *u, Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext)
{
  Scalar result = 0;
  Func<Scalar>* titer = ext->fn[0];
  for (int i = 0; i < n; i++)
    result += wt[i] * (HEATCAP * u->val[i] * v->val[i] / TAU +
                       dlam_dT(titer->val[i]) * u->val[i] * (titer->dx[i] * v->dx[i] + titer->dy[i] * v->dy[i]) +
                       lam(titer->val[i]) * (u->dx[i] * v->dx[i] + u->dy[i] * v->dy[i]));
  return result;
}
예제 #11
0
Scalar J_cranic(int n, double *wt, Func<Real> *u_ext[], Func<Real> *u, Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext)
{
  Scalar result = 0;
  Func<Scalar>* u_prev_newton = u_ext[0];
  for (int i = 0; i < n; i++)
    result += wt[i] * (u->val[i] * v->val[i] / TAU +
                       0.5 * dlam_du(u_prev_newton->val[i]) * u->val[i] * (u_prev_newton->dx[i] * v->dx[i] + u_prev_newton->dy[i] * v->dy[i])
                       + 0.5 * lam(u_prev_newton->val[i]) * (u->dx[i] * v->dx[i] + u->dy[i] * v->dy[i]));
  return result;
}
예제 #12
0
Scalar jac_sdirk(int n, double *wt, Func<Real> *u_ext[], Func<Real> *u, Func<Real> *v, 
                         Geom<Real> *e, ExtData<Scalar> *ext)
{
  Scalar result = 0;
  Func<Scalar>* u_prev_newton = u_ext[0];
  for (int i = 0; i < n; i++)
    result += wt[i] * (u->val[i] * v->val[i] / TAU
                       + BUTCHER_A_11 * (dlam_du(u_prev_newton->val[i]) * u->val[i] 
                                  * (u_prev_newton->dx[i] * v->dx[i] + u_prev_newton->dy[i] * v->dy[i])
                                  + lam(u_prev_newton->val[i]) * (u->dx[i] * v->dx[i] + u->dy[i] * v->dy[i])));
  return result;
}
예제 #13
0
Scalar res1(int n, double *wt, Func<Real> *u_ext[], Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext)
{
  Scalar result = 0;
  Func<Scalar>* Y1_prev_newton = u_ext[0];
  Func<Scalar>* u_prev_time = ext->fn[0];
  for (int i = 0; i < n; i++)
    result += wt[i] * ((Y1_prev_newton->val[i] - u_prev_time->val[i]) * v->val[i] 
                       + GAMMA * TAU * (lam(Y1_prev_newton->val[i]) * (Y1_prev_newton->dx[i] * v->dx[i] 
                                                                       + Y1_prev_newton->dy[i] * v->dy[i])
                                        - heat_src(e->x[i], e->y[i], TIME+GAMMA*TAU) * v->val[i]));
  return result;
}
예제 #14
0
Scalar jac2(int n, double *wt, Func<Real> *u_ext[], Func<Real> *u, Func<Real> *v, Geom<Real> *e, ExtData<Scalar> *ext)
{
  Scalar result = 0;
  Func<Scalar>* Y2_prev_newton = u_ext[0];
  for (int i = 0; i < n; i++)
    result += wt[i] * (u->val[i] * v->val[i]
                       + GAMMA * TAU * (dlam_du(Y2_prev_newton->val[i]) * u->val[i] * (Y2_prev_newton->dx[i] * v->dx[i]
                                                                                       + Y2_prev_newton->dy[i] * v->dy[i])
                                        + lam(Y2_prev_newton->val[i]) * (u->dx[i] * v->dx[i]
                                                                         + u->dy[i] * v->dy[i])));
  return result;
}
예제 #15
0
void ProjectToEdge (const Surface * f1, const Surface * f2, Point<3> & hp)
{
  Vec<2> rs, lam;
  Vec<3> a1, a2;
  Mat<2> a;

  int i = 10;
  while (i > 0)
    {
      i--;
      rs(0) = f1 -> CalcFunctionValue (hp);
      rs(1) = f2 -> CalcFunctionValue (hp);
      f1->CalcGradient (hp, a1);
      f2->CalcGradient (hp, a2);

      double alpha = fabs(a1*a2)/sqrt(a1.Length2()*a2.Length2());
      if(fabs(1.-alpha) < 1e-6)
	{
	  if(fabs(rs(0)) >= fabs(rs(1)))
	    f1 -> Project(hp);
	  else
	    f2 -> Project(hp);
	}
      else
	{

	  a(0,0) = a1 * a1;
	  a(0,1) = a(1,0) = a1 * a2;
	  a(1,1) = a2 * a2;
	  
	  a.Solve (rs, lam);

	  hp -= lam(0) * a1 + lam(1) * a2;
	}

      if (Abs2 (rs) < 1e-24 && i > 1) i = 1;
    }
}
예제 #16
0
파일: forms.cpp 프로젝트: alieed/hermes
Scalar stac_residual(int n, double *wt, Func<Real> *u_ext[], Func<Real> *v, 
                     Geom<Real> *e, ExtData<Scalar> *ext)
{
  Func<Scalar>* u_prev = u_ext[0];

  // This is a temporary workaround. The stage time t_n + h * c_i
  // can be accessed via u_stage_time->val[0];
  // In this particular case the stage time is not needed as 
  // the form does not depend explicitly on time.
  Func<Scalar>* u_stage_time = ext->fn[0]; 

  // Stationary part of the residual (time derivative term left out).
  Scalar result1 = 0, result2 = 0;
  for (int i = 0; i < n; i++) {
    result1 = result1 - wt[i] * lam(u_prev->val[i]) * (u_prev->dx[i] * v->dx[i] + u_prev->dy[i] * v->dy[i]);
    result2 = result2 + wt[i] * heat_src(e->x[i], e->y[i]) * v->val[i];
  }

  return result1 + result2;
}
예제 #17
0
Scalar stac_jacobian(int n, double *wt, Func<Real> *u_ext[], Func<Real> *u, Func<Real> *v, 
                     Geom<Real> *e, ExtData<Scalar> *ext)
{
  Func<Scalar>* K_sln = u_ext[0];
  Func<Scalar>* sln_prev_time = ext->fn[0];

  // This is a temporary workaround. The stage time t_n + h * c_i
  // can be accessed via u_stage_time->val[0];
  // In this particular case the stage time is not needed as 
  // the form does not depend explicitly on time.
  //Func<Scalar>* u_stage_time = ext->fn[1]; 

  // Stationary part of the Jacobian matrix (time derivative term left out).
  Scalar result1 = 0, result2 = 0;
  for (int i = 0; i < n; i++) {
    Scalar sln_val_i = sln_prev_time->val[i] + K_sln->val[i];
    Scalar sln_dx_i = sln_prev_time->dx[i] + K_sln->dx[i];
    Scalar sln_dy_i = sln_prev_time->dy[i] + K_sln->dy[i];
    result1 += -wt[i] * dlam_du(sln_val_i) * u->val[i] * (sln_dx_i * v->dx[i] + sln_dy_i * v->dy[i]);
    result2 += -wt[i] * lam(sln_val_i) * (u->dx[i] * v->dx[i] + u->dy[i] * v->dy[i]);
  }

  return result1 + result2;
}
예제 #18
0
 double ExponentialModel::sim(RNG &rng) const { return rexp_mt(rng, lam()); }
예제 #19
0
파일: PoissonModel.cpp 프로젝트: cran/Boom
 double PoissonModel::logp(int x) const { return dpois(x, lam(), true); }
예제 #20
0
파일: PoissonModel.cpp 프로젝트: cran/Boom
 double PoissonModel::simdat(RNG &rng) const { return rpois_mt(rng, lam()); }
예제 #21
0
 double PoissonModel::pdf(uint x, bool logscale) const{
   return dpois(x, lam(), logscale); }
예제 #22
0
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])  
{ 	
	register int i;
	float x;
	int max_len = 6;
	int p;
	int ns0 = 29;
	static area_function *af0;

	if (CheckArguments(nlhs, plhs, nrhs, prhs) ){
		mexErrMsgTxt("AMgetdata argument checking failed.");
		return ;
	}

	/* read nasal tract area function from the file */
	//printf("Reading nasal tract file.\n");
	read_af(NTAFpath , &nna, &afnt );
	//printf("Finished Reading nasal tract file.\n");

	/* Initialization */
	//read_rad();


	// update all constants and variables
	if((int)(pTCcfg[0]) == RL_CIRCUIT)	rad_boundary = RL_CIRCUIT;
	else	rad_boundary = SHORT_CIRCUIT;
	//printf("Rad_boundary[%d]\n",rad_boundary);
	if((int)(pTCcfg[1]) == YIELDING)	wall = YIELDING;
	else	wall = RIGID;
	//printf("wall[%d]\n",wall);
	if((int)(pTCcfg[2]) == ON)	nasal_tract = ON;
	else	nasal_tract = OFF;
	//printf("nasal_tract[%d]\n",nasal_tract);
	if((int)(pTCcfg[3]) == CLOSE)	glt_boundary = CLOSE;
	else	glt_boundary = OPEN;
	//printf("glt_boundary[%d]\n",glt_boundary);

	ro = (float)(pPCcfg[0]);
	//printf("Air density[%f]\n",ro);
	c = (float)(pPCcfg[1]);
	//printf("Sound velocity[%f]\n",c);
	wall_resi = (float)(pPCcfg[2]);
	//printf("wall_resi[%f]\n",wall_resi);
	wall_mass = (float)(pPCcfg[3]);
	//printf("wall_mass[%f]\n",wall_mass);
	wall_comp = (float)(pPCcfg[4]);
	//printf("wall_comp[%f]\n",wall_comp);

	for(i=0;i<7;i++)
	{
		AMpar[i] = (float)(pAMcfg[i]);
		//printf("AMpar[%d][%f]\n",i,AMpar[i]);
	}
	anc = (float)(pAMcfg[7]);
	//printf("nasal area[%f]\n",anc);


	/* Initialization */
	read_model_spec();
	af0 = (area_function *) calloc( ns0, sizeof(area_function) );
	nph = 9;
	nbu = 8;
	nss = nbu + nph;
	afvt  = (area_function *) calloc( nss, sizeof(area_function) );
	convert_scale();
	semi_polar();

	
	oAf = mxCreateDoubleMatrix(2,nss, mxREAL);
	pAf = mxGetPr(oAf);

	/* Compute VT profile and area function, and plot them */
	lam( AMpar );				/* profile	*/
	
	sagittal_to_area( &ns0, af0 );		/* area function */
	for(i=0;i<ns0;i++)
	{
		//printf("AreaNS0[%d][%f]\n",i,af0[i]);
	}
	appro_area_function( ns0, af0, nss, afvt);
	for(i=0;i<nss;i++)
	{
		//printf("AreaAF[%d][%f]\n",i,afvt[i]);
		pAf[2*i] = afvt[i].A;
		pAf[2*i+1] = afvt[i].x;
	}
	if( nasal_tract == ON )
	{  
		anc = (float) min( anc, afvt[nph].A );
		afvt[nph].A -= anc;
		pAf[2*nph] = afvt[nph].A;
		pAf[2*nph+1] = afvt[nph].x;
	}

	calplot_tf_FBA(nfrmmax, frm, bw, amp, &nfrms, tfunc, &ntf);

	//printf("Finished calculations.\n");

	oPdat1 = mxCreateDoubleMatrix(4,NP, mxREAL);
	pPdat1 = mxGetPr(oPdat1);
	for(i=0;i<NP;i++)
	{
		pPdat1[4*i] = ivt[i].x;
		pPdat1[4*i+1] = ivt[i].y;
		pPdat1[4*i+2] = evt[i].x;
		pPdat1[4*i+3] = evt[i].y;
	}
	oPdat2 = mxCreateDoubleMatrix(1,2,mxREAL);
	pPdat2 = mxGetPr(oPdat2);
	pPdat2[0] = lip_w;
	pPdat2[1] = lip_h;


	oTf = mxCreateDoubleMatrix(ntf, 1, mxREAL);
	pTf = mxGetPr(oTf);
	for(i=0;i<ntf;i++)
	{
		pTf[i] = tfunc[i];
	}

	oFmt = mxCreateDoubleMatrix(nfrms, 1, mxREAL);
	pFmt = mxGetPr(oFmt);
	oBw = mxCreateDoubleMatrix(nfrms, 1, mxREAL);
	pBw = mxGetPr(oBw);
	oAmp = mxCreateDoubleMatrix(nfrms, 1, mxREAL);
	pAmp = mxGetPr(oAmp);

	for(i=0;i<nfrms;i++)
	{
		pFmt[i] = frm[i];
		pBw[i] = bw[i];
		pAmp[i] = amp[i];
	}

	/* Assign output pointers */
	plhs[0] = oAf; 
	plhs[1] = oTf; 
	plhs[2] = oFmt; 
	plhs[3] = oBw; 
	plhs[4] = oAmp; 
	plhs[5] = oPdat1; 
	plhs[6] = oPdat2; 

	// do cleanup
	//free( rad_re );
	//free( rad_im );
	free( afnt );
	free( af0 );
	free( afvt );
}
bool Mesh::interpolateElementCentered(uint elementIndex, double x, double y, double* value, const ElementOutput* output, const ValueAccessor* accessor) const
{
  const Mesh* mesh = output->dataSet->mesh();
  const Element& elem = mesh->elements()[elementIndex];

  if (elem.eType() == Element::ENP)
    {
      const Node* nodes = projectedNodes();
      QPolygonF pX(elem.nodeCount());
      QVector<double> lam(elem.nodeCount());

      for (int i=0; i<elem.nodeCount(); i++) {
          pX[i] = nodes[elem.p(i)].toPointF();
      }

      if (!ENP_physicalToLogical(pX, QPointF(x, y), lam))
        return false;

      *value = accessor->value(elementIndex);
      return true;
    }
  else if (elem.eType() == Element::E4Q)
  {
    int e4qIndex = mE4QtmpIndex[elementIndex];
    E4Qtmp& e4q = mE4Qtmp[e4qIndex];

    double Lx, Ly;
    if (!E4Q_mapPhysicalToLogical(e4q, x, y, Lx, Ly, *mE4Qnorm))
      return false;

    if (Lx < 0 || Ly < 0 || Lx > 1 || Ly > 1)
      return false;

    *value = accessor->value(elementIndex);
    return true;
  }
  else if (elem.eType() == Element::E3T)
  {
    const Node* nodes = projectedNodes();

    double lam1, lam2, lam3;
    if (!E3T_physicalToBarycentric(nodes[elem.p(0)].toPointF(),
                                   nodes[elem.p(1)].toPointF(),
                                   nodes[elem.p(2)].toPointF(),
                                   QPointF(x, y),
                                   lam1, lam2, lam3))
      return false;

    // Now interpolate

    *value = accessor->value(elementIndex);
    return true;

  }
  else if (elem.eType() == Element::E2L)
  {
    const Node* nodes = projectedNodes();

    double lam;
    if (!E2L_physicalToLogical(nodes[elem.p(0)].toPointF(),
                               nodes[elem.p(1)].toPointF(),
                               QPointF(x, y),
                               lam))
      return false;


    // Now interpolate
    *value = accessor->value(elementIndex);
    return true;

  }
  else
  {
    Q_ASSERT(0 && "unknown element type");
    return false;
  }

}
bool Mesh::interpolate(uint elementIndex, double x, double y, double* value, const NodeOutput* output, const ValueAccessor* accessor) const
{
  const Mesh* mesh = output->dataSet->mesh();
  const Element& elem = mesh->elements()[elementIndex];

  if (elem.eType() == Element::ENP)
  {
    const Node* nodes = projectedNodes();
    QPolygonF pX(elem.nodeCount());
    QVector<double> lam(elem.nodeCount());

    for (int i=0; i<elem.nodeCount(); i++) {
        pX[i] = nodes[elem.p(i)].toPointF();
    }

    if (!ENP_physicalToLogical(pX, QPointF(x, y), lam))
      return false;

    *value = 0;
    for (int i=0; i<elem.nodeCount(); i++) {
        *value += lam[i] * accessor->value( elem.p(i) );
    }

    return true;
  }
  else if (elem.eType() == Element::E4Q)
  {
    int e4qIndex = mE4QtmpIndex[elementIndex];
    E4Qtmp& e4q = mE4Qtmp[e4qIndex];

    double Lx, Ly;
    if (!E4Q_mapPhysicalToLogical(e4q, x, y, Lx, Ly, *mE4Qnorm))
      return false;

    if (Lx < 0 || Ly < 0 || Lx > 1 || Ly > 1)
      return false;

    double q11 = accessor->value( elem.p(2) );
    double q12 = accessor->value( elem.p(1) );
    double q21 = accessor->value( elem.p(3) );
    double q22 = accessor->value( elem.p(0) );

    *value = q11*Lx*Ly + q21*(1-Lx)*Ly + q12*Lx*(1-Ly) + q22*(1-Lx)*(1-Ly);

    return true;

  }
  else if (elem.eType() == Element::E3T)
  {

    /*
        So - we're interpoalting a 3-noded triangle
        The query point must be within the bounding box of this triangl but not nessisarily
        within the triangle itself.
      */

    /*
      First determine if the point of interest lies within the triangle using Barycentric techniques
      described here:  http://www.blackpawn.com/texts/pointinpoly/
      */

    const Node* nodes = projectedNodes();

    double lam1, lam2, lam3;
    if (!E3T_physicalToBarycentric(nodes[elem.p(0)].toPointF(),
                                   nodes[elem.p(1)].toPointF(),
                                   nodes[elem.p(2)].toPointF(),
                                   QPointF(x, y),
                                   lam1, lam2, lam3))
      return false;

    // Now interpolate

    double z1 = accessor->value( elem.p(0));
    double z2 = accessor->value( elem.p(1));
    double z3 = accessor->value( elem.p(2));
    *value = lam1 * z3 + lam2 * z2 + lam3 * z1;
    return true;

  }
  else if (elem.eType() == Element::E2L)
  {

    /*
        So - we're interpoalting a 2-noded line
    */
    const Node* nodes = projectedNodes();
    double lam;

    if (!E2L_physicalToLogical(nodes[elem.p(0)].toPointF(),
                               nodes[elem.p(1)].toPointF(),
                               QPointF(x, y),
                               lam))
      return false;

    // Now interpolate

    double z1 = accessor->value( elem.p(0) );
    double z2 = accessor->value( elem.p(1) );

    *value = z1 + lam * (z2 - z1);
    return true;

  }
  else
  {
    Q_ASSERT(0 && "unknown element type");
    return false;
  }
}
예제 #25
0
 double PoissonModel::pdf(const Data * dp, bool logscale) const{
   return dpois(DAT(dp)->value(), lam(), logscale); }
예제 #26
0
 double PoissonModel::mean()const{return lam();}
예제 #27
0
 double PoissonModel::var()const{return lam();}
예제 #28
0
 double PoissonModel::sd()const{return sqrt(lam());}
예제 #29
0
 double PoissonModel::simdat()const{
   return rpois(lam());}
예제 #30
0
void NLPSolverInternal::init(){
  // Read options
  verbose_ = getOption("verbose");
  gauss_newton_ = getOption("gauss_newton");
  
  // Initialize the functions
  casadi_assert_message(!F_.isNull(),"No objective function");
  if(!F_.isInit()){
    F_.init();
    log("Objective function initialized");
  }
  if(!G_.isNull() && !G_.isInit()){
    G_.init();
    log("Constraint function initialized");
  }

  // Get dimensions
  n_ = F_.input(0).numel();
  m_ = G_.isNull() ? 0 : G_.output(0).numel();

  parametric_ = getOption("parametric");
  
  if (parametric_) {
    casadi_assert_message(F_.getNumInputs()==2, "Wrong number of input arguments to F for parametric NLP. Must be 2, but got " << F_.getNumInputs());
  } else {
    casadi_assert_message(F_.getNumInputs()==1, "Wrong number of input arguments to F for non-parametric NLP. Must be 1, but got " << F_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
  }

  // Basic sanity checks
  casadi_assert_message(F_.getNumInputs()==1 || F_.getNumInputs()==2, "Wrong number of input arguments to F. Must be 1 or 2");
  
  if (F_.getNumInputs()==2) parametric_=true;
  casadi_assert_message(getOption("ignore_check_vec") || gauss_newton_ || F_.input().size2()==1,
     "To avoid confusion, the input argument to F must be vector. You supplied " << F_.input().dimString() << endl <<
     " We suggest you make the following changes:" << endl <<
     "   -  F is an SXFunction:  SXFunction([X],[rhs]) -> SXFunction([vec(X)],[rhs])" << endl <<
     "             or            F -                   ->  F = vec(F) " << 
     "   -  F is an MXFunction:  MXFunction([X],[rhs]) -> " <<  endl <<
     "                                     X_vec = MX(\"X\",vec(X.sparsity())) " << endl <<
     "                                     F_vec = MXFunction([X_flat],[F.call([X_flat.reshape(X.sparsity())])[0]]) " << endl <<
     "             or            F -                   ->  F = vec(F) " << 
     " You may ignore this warning by setting the 'ignore_check_vec' option to true." << endl
  );
  
  casadi_assert_message(F_.getNumOutputs()>=1, "Wrong number of output arguments to F");
  casadi_assert_message(gauss_newton_  || F_.output().scalar(), "Output argument of F not scalar.");
  casadi_assert_message(F_.output().dense(), "Output argument of F not dense.");
  casadi_assert_message(F_.input().dense(), "Input argument of F must be dense. You supplied " << F_.input().dimString());
  
  if(!G_.isNull()) {
    if (parametric_) {
      casadi_assert_message(G_.getNumInputs()==2, "Wrong number of input arguments to G for parametric NLP. Must be 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(G_.getNumInputs()==1, "Wrong number of input arguments to G for non-parametric NLP. Must be 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(G_.getNumOutputs()>=1, "Wrong number of output arguments to G");
    casadi_assert_message(G_.input().numel()==n_, "Inconsistent dimensions");
    casadi_assert_message(G_.input().sparsity()==F_.input().sparsity(), "F and G input dimension must match. F " << F_.input().dimString() << ". G " << G_.input().dimString());
  }
  
  // Find out if we are to expand the objective function in terms of scalar operations
  bool expand_f = getOption("expand_f");
  if(expand_f){
    log("Expanding objective function");
    
    // Cast to MXFunction
    MXFunction F_mx = shared_cast<MXFunction>(F_);
    if(F_mx.isNull()){
      casadi_warning("Cannot expand objective function as it is not an MXFunction");
    } else {
      // Take use the input scheme of G if possible (it might be an SXFunction)
      vector<SXMatrix> inputv;
      if(!G_.isNull() && F_.getNumInputs()==G_.getNumInputs()){
        inputv = G_.symbolicInputSX();
      } else {
        inputv = F_.symbolicInputSX();
      }
      
      // Try to expand the MXFunction
      F_ = F_mx.expand(inputv);
      F_.setOption("number_of_fwd_dir",F_mx.getOption("number_of_fwd_dir"));
      F_.setOption("number_of_adj_dir",F_mx.getOption("number_of_adj_dir"));
      F_.init();
    }
  }
  
  
  // Find out if we are to expand the constraint function in terms of scalar operations
  bool expand_g = getOption("expand_g");
  if(expand_g){
    log("Expanding constraint function");
    
    // Cast to MXFunction
    MXFunction G_mx = shared_cast<MXFunction>(G_);
    if(G_mx.isNull()){
      casadi_warning("Cannot expand constraint function as it is not an MXFunction");
    } else {
      // Take use the input scheme of F if possible (it might be an SXFunction)
      vector<SXMatrix> inputv;
      if(F_.getNumInputs()==G_.getNumInputs()){
        inputv = F_.symbolicInputSX();
      } else {
        inputv = G_.symbolicInputSX();
      }
      
      // Try to expand the MXFunction
      G_ = G_mx.expand(inputv);
      G_.setOption("number_of_fwd_dir",G_mx.getOption("number_of_fwd_dir"));
      G_.setOption("number_of_adj_dir",G_mx.getOption("number_of_adj_dir"));
      G_.init();
    }
  }
  
  // Find out if we are to expand the constraint function in terms of scalar operations
  bool generate_hessian = getOption("generate_hessian");
  if(generate_hessian && H_.isNull()){
    casadi_assert_message(!gauss_newton_,"Automatic generation of Gauss-Newton Hessian not yet supported");
    log("generating hessian");
    
    // Simple if unconstrained
    if(G_.isNull()){
      // Create Hessian of the objective function
      FX HF = F_.hessian();
      HF.init();
      
      // Symbolic inputs of HF
      vector<MX> HF_in = F_.symbolicInput();
      
      // Lagrange multipliers
      MX lam("lam",0);
      
      // Objective function scaling
      MX sigma("sigma");
      
      // Inputs of the Hessian function
      vector<MX> H_in = HF_in;
      H_in.insert(H_in.begin()+1, lam);
      H_in.insert(H_in.begin()+2, sigma);

      // Get an expression for the Hessian of F
      MX hf = HF.call(HF_in).at(0);
      
      // Create the scaled Hessian function
      H_ = MXFunction(H_in, sigma*hf);
      log("Unconstrained Hessian function generated");
      
    } else { // G_.isNull()
      
      // Check if the functions are SXFunctions
      SXFunction F_sx = shared_cast<SXFunction>(F_);
      SXFunction G_sx = shared_cast<SXFunction>(G_);
      
      // Efficient if both functions are SXFunction
      if(!F_sx.isNull() && !G_sx.isNull()){
        // Expression for f and g
        SXMatrix f = F_sx.outputSX();
        SXMatrix g = G_sx.outputSX();
        
        // Numeric hessian
        bool f_num_hess = F_sx.getOption("numeric_hessian");
        bool g_num_hess = G_sx.getOption("numeric_hessian");
        
        // Number of derivative directions
        int f_num_fwd = F_sx.getOption("number_of_fwd_dir");
        int g_num_fwd = G_sx.getOption("number_of_fwd_dir");
        int f_num_adj = F_sx.getOption("number_of_adj_dir");
        int g_num_adj = G_sx.getOption("number_of_adj_dir");
        
        // Substitute symbolic variables in f if different input variables from g
        if(!isEqual(F_sx.inputSX(),G_sx.inputSX())){
          f = substitute(f,F_sx.inputSX(),G_sx.inputSX());
        }
        
        // Lagrange multipliers
        SXMatrix lam = ssym("lambda",g.size1());

        // Objective function scaling
        SXMatrix sigma = ssym("sigma");        
        
        // Lagrangian function
        vector<SXMatrix> lfcn_in(parametric_? 4: 3);
        lfcn_in[0] = G_sx.inputSX();
        lfcn_in[1] = lam;
        lfcn_in[2] = sigma;
        if (parametric_) lfcn_in[3] = G_sx.inputSX(1);
        SXFunction lfcn(lfcn_in, sigma*f + inner_prod(lam,g));
        lfcn.setOption("verbose",getOption("verbose"));
        lfcn.setOption("numeric_hessian",f_num_hess || g_num_hess);
        lfcn.setOption("number_of_fwd_dir",std::min(f_num_fwd,g_num_fwd));
        lfcn.setOption("number_of_adj_dir",std::min(f_num_adj,g_num_adj));
        lfcn.init();
        
        // Hessian of the Lagrangian
        H_ = static_cast<FX&>(lfcn).hessian();
        H_.setOption("verbose",getOption("verbose"));
        log("SX Hessian function generated");
        
      } else { // !F_sx.isNull() && !G_sx.isNull()
        // Check if the functions are SXFunctions
        MXFunction F_mx = shared_cast<MXFunction>(F_);
        MXFunction G_mx = shared_cast<MXFunction>(G_);
        
        // If they are, check if the arguments are the same
        if(!F_mx.isNull() && !G_mx.isNull() && isEqual(F_mx.inputMX(),G_mx.inputMX())){
          casadi_warning("Exact Hessian calculation for MX is still experimental");
          
          // Expression for f and g
          MX f = F_mx.outputMX();
          MX g = G_mx.outputMX();
          
          // Lagrange multipliers
          MX lam("lam",g.size1());
      
          // Objective function scaling
          MX sigma("sigma");

          // Inputs of the Lagrangian function
          vector<MX> lfcn_in(parametric_? 4:3);
          lfcn_in[0] = G_mx.inputMX();
          lfcn_in[1] = lam;
          lfcn_in[2] = sigma;
          if (parametric_) lfcn_in[3] = G_mx.inputMX(1);

          // Lagrangian function
          MXFunction lfcn(lfcn_in,sigma*f+ inner_prod(lam,g));
          lfcn.init();
	  log("SX Lagrangian function generated");
          
/*          cout << "countNodes(lfcn.outputMX()) = " << countNodes(lfcn.outputMX()) << endl;*/
      
          bool adjoint_mode = true;
          if(adjoint_mode){
          
            // Gradient of the lagrangian
            MX gL = lfcn.grad();
            log("MX Lagrangian gradient generated");

            MXFunction glfcn(lfcn_in,gL);
            glfcn.init();
            log("MX Lagrangian gradient function initialized");
//           cout << "countNodes(glfcn.outputMX()) = " << countNodes(glfcn.outputMX()) << endl;

            // Get Hessian sparsity
            CRSSparsity H_sp = glfcn.jacSparsity();
            log("MX Lagrangian Hessian sparsity determined");
            
            // Uni-directional coloring (note, the hessian is symmetric)
            CRSSparsity coloring = H_sp.unidirectionalColoring(H_sp);
            log("MX Lagrangian Hessian coloring determined");

            // Number of colors needed is the number of rows
            int nfwd_glfcn = coloring.size1();
            log("MX Lagrangian gradient function number of sensitivity directions determined");

            glfcn.setOption("number_of_fwd_dir",nfwd_glfcn);
            glfcn.updateNumSens();
            log("MX Lagrangian gradient function number of sensitivity directions updated");
            
            // Hessian of the Lagrangian
            H_ = glfcn.jacobian();
          } else {

            // Hessian of the Lagrangian
            H_ = lfcn.hessian();
            
          }
          log("MX Lagrangian Hessian function generated");
          
        } else {
          casadi_assert_message(0, "Automatic calculation of exact Hessian currently only for F and G both SXFunction or MXFunction ");
        }
      } // !F_sx.isNull() && !G_sx.isNull()
    } // G_.isNull()
  } // generate_hessian && H_.isNull()
  if(!H_.isNull() && !H_.isInit()) {
    H_.init();
    log("Hessian function initialized");
  }

  // Create a Jacobian if it does not already exists
  bool generate_jacobian = getOption("generate_jacobian");
  if(generate_jacobian && !G_.isNull() && J_.isNull()){
    log("Generating Jacobian");
    J_ = G_.jacobian();
    
    // Use live variables if SXFunction
    if(!shared_cast<SXFunction>(J_).isNull()){
      J_.setOption("live_variables",true);
    }
    log("Jacobian function generated");
  }
    
  if(!J_.isNull() && !J_.isInit()){
    J_.init();
    log("Jacobian function initialized");
  }

  
  if(!H_.isNull()) {
    if (parametric_) {
      casadi_assert_message(H_.getNumInputs()>=2, "Wrong number of input arguments to H for parametric NLP. Must be at least 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(H_.getNumInputs()>=1, "Wrong number of input arguments to H for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(H_.getNumOutputs()>=1, "Wrong number of output arguments to H");
    casadi_assert_message(H_.input(0).numel()==n_,"Inconsistent dimensions");
    casadi_assert_message(H_.output().size1()==n_,"Inconsistent dimensions");
    casadi_assert_message(H_.output().size2()==n_,"Inconsistent dimensions");
  }

  if(!J_.isNull()){
    if (parametric_) {
      casadi_assert_message(J_.getNumInputs()==2, "Wrong number of input arguments to J for parametric NLP. Must be at least 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(J_.getNumInputs()==1, "Wrong number of input arguments to J for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(J_.getNumOutputs()>=1, "Wrong number of output arguments to J");
    casadi_assert_message(J_.input().numel()==n_,"Inconsistent dimensions");
    casadi_assert_message(J_.output().size2()==n_,"Inconsistent dimensions");
  }

  if (parametric_) {
    sp_p = F_->input(1).sparsity();
    
    if (!G_.isNull()) casadi_assert_message(sp_p == G_->input(G_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while G has got " << G_->input(G_->getNumInputs()-1).dimString());
    if (!H_.isNull()) casadi_assert_message(sp_p == H_->input(H_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while H has got " << H_->input(H_->getNumInputs()-1).dimString());
    if (!J_.isNull()) casadi_assert_message(sp_p == J_->input(J_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while J has got " << J_->input(J_->getNumInputs()-1).dimString());
  }
  
  // Infinity
  double inf = numeric_limits<double>::infinity();
  
  // Allocate space for inputs
  input_.resize(NLP_NUM_IN - (parametric_? 0 : 1));
  input(NLP_X_INIT)      = DMatrix(n_,1,0);
  input(NLP_LBX)         = DMatrix(n_,1,-inf);
  input(NLP_UBX)         = DMatrix(n_,1, inf);
  input(NLP_LBG)         = DMatrix(m_,1,-inf);
  input(NLP_UBG)         = DMatrix(m_,1, inf);
  input(NLP_LAMBDA_INIT) = DMatrix(m_,1,0);
  if (parametric_) input(NLP_P) = DMatrix(sp_p,0);
  
  // Allocate space for outputs
  output_.resize(NLP_NUM_OUT);
  output(NLP_X_OPT)      = DMatrix(n_,1,0);
  output(NLP_COST)       = DMatrix(1,1,0);
  output(NLP_LAMBDA_X)   = DMatrix(n_,1,0);
  output(NLP_LAMBDA_G)   = DMatrix(m_,1,0);
  output(NLP_G)          = DMatrix(m_,1,0);
  
  if (hasSetOption("iteration_callback")) {
   callback_ = getOption("iteration_callback");
   if (!callback_.isNull()) {
     if (!callback_.isInit()) callback_.init();
     casadi_assert_message(callback_.getNumOutputs()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue");
     casadi_assert_message(callback_.output(0).size()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue");
     casadi_assert_message(callback_.getNumInputs()==NLP_NUM_OUT, "Callback function should have the output scheme of NLPSolver as input scheme. i.e. " <<NLP_NUM_OUT << " inputs instead of the " << callback_.getNumInputs() << " you provided." );
     for (int i=0;i<NLP_NUM_OUT;i++) {
       casadi_assert_message(callback_.input(i).sparsity()==output(i).sparsity(),
         "Callback function should have the output scheme of NLPSolver as input scheme. " << 
         "Input #" << i << " (" << getSchemeEntryEnumName(SCHEME_NLPOutput,i) <<  " aka '" << getSchemeEntryName(SCHEME_NLPOutput,i) << "') was found to be " << callback_.input(i).dimString() << " instead of expected " << output(i).dimString() << "."
       );
       callback_.input(i).setAll(0);
     }
   }
  }
  
  callback_step_ = getOption("iteration_callback_step");

  // Call the initialization method of the base class
  FXInternal::init();
}