コード例 #1
0
/*
 * Estimate brightness of a cluster using a General Weighted Least Squares approach.
 * (actually a single step of an Iteratively reWeighted Least Squares).
 * Unlike the routine in the original AYB code, this uses processed intensities
 * p            Matrix of processed intensities
 * base         Array of base calls, one per cycle
 * oldlambda    Previous estimate of lambda
 * v            Array of cycle specific scales, length ncycle
 * V            Array of cycle specific variances
 */
real_t estimate_lambdaGWLS( const MAT p, const NUC * base, const real_t oldlambda, const real_t * v, const MAT * V) {
    validate(NULL!=p,NAN);
    validate(NBASE==p->nrow,NAN);
    validate(NULL!=base,NAN);
    validate(oldlambda>=0.,NAN);
    validate(NULL!=v,NAN);
    validate(NULL!=V,NAN);
    const uint32_t ncycle = p->ncol;

    real_t numerator = 0.0, denominator=0.0;
    for (uint32_t cycle=0 ; cycle<ncycle ; cycle++) {
        const int cybase = base[cycle];

        // p^t %*% Omega %*% p
        real_t pVp = xMy(p->x+cycle*NBASE,V[cycle],p->x+cycle*NBASE);
        // p^t %*% Omega %*% base
        real_t pVb = 0.0;
        for ( uint32_t i=0 ; i<NBASE ; i++) {
            pVb += p->x[cycle*NBASE+i] * V[cycle]->x[cybase*NBASE+i];
        }

        // Calculate Sum Squared Error, then weight
        real_t sse = pVp - 2.0 * oldlambda * pVb + oldlambda*oldlambda*V[cycle]->x[cybase*NBASE+cybase];
        real_t w = cauchy(sse,v[cycle]);

        // Accumulate
        numerator   += w * pVb;
        denominator += w * V[cycle]->x[cybase*NBASE+cybase];
    }

    real_t lambda = numerator / denominator;
    return (lambda>aybopt.min_lambda)?lambda:aybopt.min_lambda;
}
コード例 #2
0
/*
 * Estimate brightness of a cluster using a Weighted Least Squares approach.
 * (actually a single step of an Iteratively reWeighted Least Squares).
 * Unlike the routine in the original AYB code, this uses processed intensities
 * p            Matrix of processed intensities
 * base         Array of base calls, one per cycle
 * oldlambda    Previous estimate of lambda
 * v            Array of cycle specific scales, length ncycle
 */
real_t estimate_lambdaWLS( const MAT p, const NUC * base, const real_t oldlambda, const real_t * v) {
    validate(NULL!=p,NAN);
    validate(NBASE==p->nrow,NAN);
    validate(NULL!=base,NAN);
    validate(oldlambda>=0.,NAN);
    validate(NULL!=v,NAN);
    const uint32_t ncycle = p->ncol;

    real_t numerator = 0.0, denominator=0.0;
    for (uint32_t cycle=0 ; cycle<ncycle ; cycle++) {
        const int cybase = base[cycle];
        // Calculate Sum Squared Error, then weight
        real_t sse = 0.0;
        for ( int j=0 ; j<NBASE ; j++) {
            sse += p->x[cycle*NBASE+j] * p->x[cycle*NBASE+j];
        }
        sse -= 2.0 * oldlambda * p->x[cycle*NBASE+cybase];
        sse += oldlambda*oldlambda;
        real_t w = cauchy(sse,v[cycle*4+cybase]);
        //real_t w = 1.0/v[cycle*4+cybase];
        // Accumulate
        numerator += p->x[cycle*NBASE+cybase] * w;
        denominator += w;
    }

    real_t lambda = numerator / denominator;
    return (lambda>aybopt.min_lambda)?lambda:aybopt.min_lambda;
}
コード例 #3
0
ファイル: cauchy.cpp プロジェクト: atyuwen/nt2
NT2_TEST_CASE_TPL ( cauchy_ints, NT2_REAL_TYPES)
{
  NT2_DISPLAY(nt2::cauchy(3, 3, nt2::meta::as_<T>()));
  nt2::table<T> a0 = nt2::_(T(1), 3);
  NT2_DISPLAY(nt2::cauchy(a0));

  nt2::table<T> v =  cauchy(a0);
  NT2_DISPLAY(v);
  nt2::table<T> z = nt2::rec(nt2::cif(3, nt2::meta::as_<T>())+nt2::rif(3, nt2::meta::as_<T>()));
  NT2_TEST_EQUAL(v, z);
}
コード例 #4
0
double geomCovariance(double *dist, int n, int dim, int covmod,
		      double sigma2, double sigma2Bound, double nugget,
		      double range, double smooth, double smooth2,
		      double *rho){

  //This function computes the geometric gaussian covariance function
  //between each pair of locations.
  //When ans != 0.0, the parameters are ill-defined.
  const double twiceSigma2 = 2 * sigma2;
  double ans = 0.0;

  if (sigma2 <= 0)
    return (1 - sigma2) * (1 - sigma2) * MINF;

  if (sigma2 > sigma2Bound)
    return (sigma2Bound - 1 - sigma2) * (sigma2Bound - 1 - sigma2) * MINF;

  switch (covmod){
  case 1:
    ans = whittleMatern(dist, n, nugget, 1 - nugget, range, smooth, rho);
    break;
  case 2:
    ans = cauchy(dist, n, nugget, 1 - nugget, range, smooth, rho);
    break;
  case 3:
    ans = powerExp(dist, n, nugget, 1 - nugget, range, smooth, rho);
    break;
  case 4:
    ans = bessel(dist, n, dim, nugget, 1 - nugget, range, smooth, rho);
    break;
  case 5:
    ans = caugen(dist, n, nugget, 1 - nugget, range, smooth, smooth2, rho);
  }

  if (ans != 0.0)
    return ans;

  #pragma omp parallel for
  for (int i=0;i<n;i++)
    rho[i] = sqrt(twiceSigma2 * (1 - rho[i]));

  return ans;
}
コード例 #5
0
ファイル: cpoly.cpp プロジェクト: thomasklau/simbody
int CPoly<T>::findRoots( const T *opr, const T *opi, int degree, T *zeror, T *zeroi )
   {
   int cnt1, cnt2, idnn2, i, conv;
   T xx, yy, cosr, sinr, smalno, base, xxx, zr, zi, bnd;

   mcon( &eta, &infin, &smalno, &base );
   are = eta;
   mre = (T) (2.0 * sqrt( 2.0 ) * eta);
   xx = (T) 0.70710678;
   yy = -xx;
   cosr = (T) -0.060756474;
   sinr = (T) -0.99756405;
   nn = degree;

   // Algorithm fails if the leading coefficient is zero, or degree is zero.
   if( nn < 1 || (opr[ 0 ] == 0 && opi[ 0 ] == 0) )
      return -1;


   // Remove the zeros at the origin if any
   while( opr[ nn ] == 0 && opi[ nn ] == 0 )
      {
      idnn2 = degree - nn;
      zeror[ idnn2 ] = 0;
      zeroi[ idnn2 ] = 0;
      nn--;
      }

   // sherm 20130410: If all coefficients but the leading one were zero, then
   // all solutions are zero; should be a successful (if boring) return.
   if (nn == 0)
      return degree;

   // Allocate arrays
   pr = new T [ degree+1 ];
   pi = new T [ degree+1 ];
   hr = new T [ degree+1 ];
   hi = new T [ degree+1 ];
   qpr= new T [ degree+1 ];
   qpi= new T [ degree+1 ];
   qhr= new T [ degree+1 ];
   qhi= new T [ degree+1 ];
   shr= new T [ degree+1 ];
   shi= new T [ degree+1 ];

   // Make a copy of the coefficients
   for( i = 0; i <= nn; i++ )
      {
      pr[ i ] = opr[ i ];
      pi[ i ] = opi[ i ];
      shr[ i ] = cmod( pr[ i ], pi[ i ] );
      }

   // Scale the polynomial
   bnd = scale( nn, shr, eta, infin, smalno, base );
   if( bnd != 1 )
      for( i = 0; i <= nn; i++ )
         {
         pr[ i ] *= bnd;
         pi[ i ] *= bnd;
         }

search:
   if( nn <= 1 )
      {
      cdivid( -pr[ 1 ], -pi[ 1 ], pr[ 0 ], pi[ 0 ], &zeror[ degree-1 ], &zeroi[ degree-1 ] );
      goto finish;
      }

   // Calculate bnd, alower bound on the modulus of the zeros
   for( i = 0; i<= nn; i++ )
      shr[ i ] = cmod( pr[ i ], pi[ i ] );

   cauchy( nn, shr, shi, &bnd );

   // Outer loop to control 2 Major passes with different sequences of shifts
   for( cnt1 = 1; cnt1 <= 2; cnt1++ )
      {
      // First stage  calculation , no shift
      noshft( 5 );

      // Inner loop to select a shift
      for( cnt2 = 1; cnt2 <= 9; cnt2++ )
         {
         // Shift is chosen with modulus bnd and amplitude rotated by 94 degree from the previous shif
         xxx = cosr * xx - sinr * yy;
         yy = sinr * xx + cosr * yy;
         xx = xxx;
         sr = bnd * xx;
         si = bnd * yy;

         // Second stage calculation, fixed shift
         fxshft( 10 * cnt2, &zr, &zi, &conv );
         if( conv )
            {
            // The second stage jumps directly to the third stage ieration
            // If successful the zero is stored and the polynomial deflated
            idnn2 = degree - nn;
            zeror[ idnn2 ] = zr;
            zeroi[ idnn2 ] = zi;
            nn--;
            for( i = 0; i <= nn; i++ )
               {
               pr[ i ] = qpr[ i ];
               pi[ i ] = qpi[ i ];
               }
            goto search;
            }
         // If the iteration is unsuccessful another shift is chosen
         }
      // if 9 shifts fail, the outer loop is repeated with another sequence of shifts
      }

   // The zerofinder has failed on two major passes
   // return empty handed with the number of roots found (less than the original degree)
   degree -= nn;

finish:
   // Deallocate arrays
   delete [] pr;
   delete [] pi;
   delete [] hr;
   delete [] hi;
   delete [] qpr;
   delete [] qpi;
   delete [] qhr;
   delete [] qhi;
   delete [] shr;
   delete [] shi;

   return degree;
   }
コード例 #6
0
ファイル: random-inl.hpp プロジェクト: mapleyustat/Thunder
T& Random< T, G, I, F >::cauchy(T &t, F a, F b) {
  return const_cast< T& >(cauchy(const_cast< const T& >(t), a, b));
}
コード例 #7
0
void latentgev(int *n, double *data, int *nSite, int *nObs, int *covmod,
	       int *dim, double *distMat, double *dsgnMat, int *nBeta,
	       double *beta, double *sills, double *ranges, double *smooths,
	       double *gevParams, double *hyperSill, double *hyperRange,
	       double *hyperSmooth, double *hyperBetaMean,
	       double *hyperBetaIcov, double *propGev, double *propRanges,
	       double *propSmooths, double *mcLoc, double *mcScale,
	       double *mcShape, double *accRates, double *extRates, int *thin,
	       int *burnin){


  int iter = 0, iterThin = 0, idxSite, idxSite2, idxMarge, idxBeta, info = 0,
    oneInt = 1, nSite2 = *nSite * *nSite,
    nPairs = *nSite * (*nSite + 1) / 2,
    *cumBeta = (int *) R_alloc(4, sizeof(int)),
    *cumBeta2 = (int *) R_alloc(3, sizeof(int)),
    *nBeta2 = (int *) R_alloc(3, sizeof(int)),
    lagLoc = nBeta[0] + 3 + *nSite, lagScale = nBeta[1] + 3 + *nSite,
    lagShape = nBeta[2] + 3 + *nSite;

  cumBeta[0] = 0;
  cumBeta[1] = nBeta[0];
  cumBeta[2] = nBeta[0] + nBeta[1];
  cumBeta[3] = cumBeta[2] + nBeta[2];
  cumBeta2[0] = 0;
  cumBeta2[1] = nBeta[0] * nBeta[0];
  cumBeta2[2] = nBeta[0] * nBeta[0] + nBeta[1] * nBeta[1];
  nBeta2[0] = nBeta[0] * nBeta[0];
  nBeta2[1] = nBeta[1] * nBeta[1];
  nBeta2[2] = nBeta[2] * nBeta[2];

  double one = 1.0, zero = 0.0, flag = 0.0, logDetProp,
    *logDet = (double *) R_alloc(3, sizeof(double)),
    *covMatChol = (double *) R_alloc(3 * nSite2, sizeof(double)),
    *GPmean = (double *) R_alloc(3 * *nSite, sizeof(double)),
    *resTop = (double *) R_alloc(*nSite, sizeof(double)),
    *resBottom = (double *) R_alloc(*nSite, sizeof(double)),
    *covariances = (double *) R_alloc(nPairs, sizeof(double)),
    *proposalGEV = (double *) R_alloc(3, sizeof(double)),
    *covMatPropChol = (double *) R_alloc(nSite2, sizeof(double));

  for (int i=3;i--;)
    logDet[i] = 0;

  for (int i=(3 * nSite2);i--;)
    covMatChol[i] = 0;

  for (int i=(3 * *nSite);i--;)
    GPmean[i] = 0;

  for (int i=nSite2;i--;)
    covMatPropChol[i] = 0;

  /*----------------------------------------------------*/
  /*                                                    */
  /*           Compute some initial objects             */
  /*                                                    */
  /*----------------------------------------------------*/

  // a. The inverse of the covariance matrices
  for (idxMarge=0;idxMarge<3;idxMarge++){

    switch(covmod[idxMarge]){
    case 1:
      flag = whittleMatern(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			   smooths[idxMarge], covariances);
      break;
    case 2:
      flag = cauchy(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
		    smooths[idxMarge], covariances);
      break;
    case 3:
      flag = powerExp(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
		      smooths[idxMarge], covariances);
      break;
    case 4:
      flag = bessel(distMat, nPairs, *dim, zero, sills[idxMarge], ranges[idxMarge],
		    smooths[idxMarge], covariances);
      break;
    }

    if (flag != 0)
      error("The starting values (covariance parameter) are ill-defined. Please check\n");

    /* We need to fill in the upper triangular part of covMatChol with
       covariances */
    {
      int current=-1;
      for (idxSite=0;idxSite<*nSite;idxSite++)
	for (idxSite2=idxSite;idxSite2<*nSite;idxSite2++){
	  current++;
	  covMatChol[idxSite + idxSite2 * *nSite + idxMarge * nSite2] = covariances[current];
	}
    }

    // Finally compute its Cholesky decomposition
    F77_CALL(dpotrf)("U", nSite, covMatChol + idxMarge * nSite2, nSite, &info);

    if (info != 0)
      error("Impossible to get the Cholesky decomp. from the starting values\n");

    /* Compute the log of the determinant of the proposal
       cov. mat. using the sum of the square of the diagonal elements of
       the Cholesky decomposition */
    for (idxSite2=0;idxSite2<*nSite;idxSite2++)
      logDet[idxMarge] += log(covMatChol[idxSite2 * (*nSite + 1) + idxMarge *
					 nSite2]);

    logDet[idxMarge] *= 2;
  }

  // b. The mean of the Gaussian processes
  for (idxMarge=0;idxMarge<3;idxMarge++)
    for (idxSite=0;idxSite<*nSite;idxSite++)
      for (idxBeta=0;idxBeta<nBeta[idxMarge];idxBeta++)
	GPmean[idxSite + idxMarge * *nSite] +=
	  dsgnMat[idxBeta * *nSite + idxSite + cumBeta[idxMarge] * *nSite] *
	  beta[cumBeta[idxMarge] + idxBeta];

  // c. Some constant related to the conjugate distributions
  double *conjMeanCst = (double *)R_alloc(cumBeta[3], sizeof(double));
  for(int i=cumBeta[3];i--;)
    conjMeanCst[i]=0;

  for (idxMarge=0;idxMarge<3;idxMarge++)
    F77_CALL(dsymv)("U", nBeta + idxMarge, &one, hyperBetaIcov +
		    cumBeta2[idxMarge], nBeta + idxMarge, hyperBetaMean +
		    cumBeta[idxMarge], &oneInt, &zero, conjMeanCst + cumBeta[idxMarge],
		    &oneInt);

  /*----------------------------------------------------*/
  /*                                                    */
  /*               Starting the MCMC algo               */
  /*                                                    */
  /*----------------------------------------------------*/

  GetRNGstate();
  while (iterThin<*n){

    /*----------------------------------------------------*/
    /*                                                    */
    /*           Updating the GEV parameters              */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxSite=0;idxSite<*nSite;idxSite++){
      for (idxMarge=0;idxMarge<3;idxMarge++){
	double logpropRatio = 0;
	proposalGEV[0] = gevParams[idxSite];
	proposalGEV[1] = gevParams[*nSite + idxSite];
	proposalGEV[2] = gevParams[2 * *nSite + idxSite];

	if (idxMarge==1){
	  proposalGEV[1] = rlnorm(log(gevParams[*nSite + idxSite]), propGev[1]);
	  logpropRatio = log(proposalGEV[1] / gevParams[*nSite + idxSite]);
	}

	else
	  proposalGEV[idxMarge] = rnorm(gevParams[idxMarge * *nSite + idxSite], propGev[idxMarge]);

	double topGEV = 0, bottomGEV = 0;
	gevlik(data + idxSite * *nObs, nObs, proposalGEV, proposalGEV + 1,
	       proposalGEV + 2, &topGEV);

	if (topGEV == -1e6){
	  extRates[idxMarge]++;
	  continue;
	}

	gevlik(data + idxSite * *nObs, nObs, gevParams + idxSite, gevParams +
	       *nSite + idxSite, gevParams + 2 * *nSite + idxSite, &bottomGEV);

	double topGP = 0, bottomGP = 0;
	for (idxSite2=0;idxSite2<*nSite;idxSite2++)
	  resBottom[idxSite2] = gevParams[idxSite2 + idxMarge * *nSite] -
	    GPmean[idxSite2 + idxMarge * *nSite];

	memcpy(resTop, resBottom, *nSite * sizeof(double));
	resTop[idxSite] = proposalGEV[idxMarge] - GPmean[idxSite + idxMarge *
							 *nSite];

	F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
			idxMarge * nSite2, nSite, resTop, nSite);
	F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
			idxMarge * nSite2, nSite, resBottom, nSite);

	for (idxSite2=0;idxSite2<*nSite;idxSite2++){
	  topGP += resTop[idxSite2] * resTop[idxSite2];
	  bottomGP += resBottom[idxSite2] * resBottom[idxSite2];
	}

	topGP *= -0.5;
	bottomGP *= -0.5;

	if (unif_rand() < exp(topGEV - bottomGEV + topGP - bottomGP +
			      logpropRatio)){
	  gevParams[idxSite + idxMarge * *nSite] = proposalGEV[idxMarge];
	  accRates[idxMarge]++;
	}
      }
    }

    /*----------------------------------------------------*/
    /*                                                    */
    /*        Updating the regression parameters          */
    /*                (conjugate prior)                   */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxMarge=0;idxMarge<3;idxMarge++){

      /* conjCovMat is the covariance matrix for the conjugate
	 distribution i.e. MVN

	 conjCovMatChol is its Cholesky decomposition */
      double *dummy = malloc(*nSite * nBeta[idxMarge] * sizeof(double)),
	*conjCovMat = malloc(nBeta2[idxMarge] * sizeof(double)),
	*conjCovMatChol = malloc(nBeta2[idxMarge] * sizeof(double));

      memcpy(conjCovMat, hyperBetaIcov + cumBeta2[idxMarge],
	     nBeta2[idxMarge] * sizeof(double));
      memcpy(dummy, dsgnMat + *nSite * cumBeta[idxMarge],
	     *nSite * nBeta[idxMarge] * sizeof(double));

      // Compute dummy = covMatChol^(-T) %*% dsgnMat
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, nBeta + idxMarge, &one,
		      covMatChol + idxMarge * nSite2, nSite, dummy, nSite);

      /* Compute conjCovMat = dummy^T %*% dummy + conjCovMat

	 WARNING: Only the upper diagonal elements will be stored */
      F77_CALL(dsyrk)("U", "T", nBeta + idxMarge, nSite, &one, dummy, nSite,
		      &one, conjCovMat, nBeta + idxMarge);

      /* Rmk: The "real" conjugate cov. matrix is the inverse of
	 conjCovMat but it is not necessary to compute it */

      //Compute its Cholesky decomposition
      memcpy(conjCovMatChol, conjCovMat, nBeta2[idxMarge] * sizeof(double));
      F77_CALL(dpotrf)("U", nBeta + idxMarge, conjCovMatChol, nBeta + idxMarge,
		       &info);

      // Compute dummy2 = covMatChol^(-T) %*% (locs or scales or shapes)
      double *dummy2 = malloc(*nSite * sizeof(double));
      memcpy(dummy2, gevParams + idxMarge * *nSite, *nSite * sizeof(double));
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
		      idxMarge * nSite2, nSite, dummy2, nSite);

      // conjMean is the mean for the conjugate distribution i.e. MVN
      // Set conjMean = conjMeanCst := hyperBetaIcov %*% hyperBetaMean
      double *conjMean = malloc(nBeta[idxMarge] * sizeof(double));
      memcpy(conjMean, conjMeanCst + cumBeta[idxMarge],
	     nBeta[idxMarge] * sizeof(double));

      // Compute conjMean = conjMean + dummy^T %*% dummy2 (dummy2 is a vector)
      F77_CALL(dgemv)("T", nSite, nBeta + idxMarge, &one, dummy, nSite, dummy2,
		      &oneInt, &one, conjMean, &oneInt);

      // Compute conjMean = conjCovMat^(-1) %*% conjMean
      F77_CALL(dposv)("U", nBeta + idxMarge, &oneInt, conjCovMat, nBeta +
		      idxMarge, conjMean, nBeta + idxMarge, &info);

      /* The new state is a realisation from the MVN(conjMean,
	 conjCovMat) so we simulate it from the Cholesky
	 decomposition */

      double *stdNormal = malloc(nBeta[idxMarge] * sizeof(double));
      for (idxBeta=0;idxBeta<nBeta[idxMarge];idxBeta++)
	stdNormal[idxBeta] = norm_rand();

      /* Rmk: Recall that conjCovMat is the precision matrix and *NOT*
	 the covariance matrix. Instead of using the Cholesky
	 decomposition of the conjugate covariance matrix (that we
	 still haven't computed), we use the inverse of the Cholesky
	 decomposition. This is different from the standard simulation
	 technique but completely equivalent since

	      iSigma = iSigma_*^T %*% iSigma_*
	 <==> Sigma := iSigma^(-1) = iSigma_*^(-1) %*% iSigma_*^(-T),

	 where iSigma_* is the Cholesky decomposition of iSigma.

	 Therefore we can use iSigma_*^(-1) for the simulation. */
      F77_CALL(dtrsm)("L", "U", "N", "N", nBeta + idxMarge, &oneInt,
		      &one, conjCovMatChol, nBeta + idxMarge, stdNormal,
		      nBeta + idxMarge);

      for (idxBeta=0;idxBeta<nBeta[idxMarge];idxBeta++)
	beta[cumBeta[idxMarge] + idxBeta] = stdNormal[idxBeta] +
	  conjMean[idxBeta];

      //The last step is to update the mean of the GP
      for (idxSite=0;idxSite<*nSite;idxSite++){
	GPmean[idxSite + idxMarge * *nSite] = 0;

	for (idxBeta=0;idxBeta<nBeta[idxMarge];idxBeta++)
	  GPmean[idxSite + idxMarge * *nSite] += dsgnMat[idxBeta * *nSite + idxSite +
							 cumBeta[idxMarge] * *nSite] *
	    beta[cumBeta[idxMarge] + idxBeta];
      }

      free(dummy);
      free(conjCovMat);
      free(conjCovMatChol);
      free(dummy2);
      free(conjMean);
      free(stdNormal);
    }

    /*----------------------------------------------------*/
    /*                                                    */
    /*        Updating the sills (conjugate prior)        */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxMarge=0;idxMarge<3;idxMarge++){
      for (idxSite=0;idxSite<*nSite;idxSite++)
	resTop[idxSite] = gevParams[idxSite + idxMarge * *nSite] -
	  GPmean[idxSite + idxMarge * *nSite];

      // Compute resTop = covMatChol^(-T) %*% resTop
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
		      idxMarge * nSite2, nSite, resTop, nSite);

      double shape = 0.5 * *nSite + hyperSill[2 * idxMarge];
      double scale = 0;
      for (idxSite=0;idxSite<*nSite;idxSite++)
	scale += resTop[idxSite] * resTop[idxSite];

      scale = hyperSill[1 + 2 * idxMarge] + 0.5 * sills[idxMarge] * scale;

      /* Rmk: If Y ~ Gamma(shape = shape, rate = 1 / scale) then X :=
	 1 / Y \sim IGamma(shape = shape, scale = scale) */
      sills[idxMarge] = 1 / rgamma(shape,  1 / scale);

      // Now we need to update the covariance matrix and its inverse
      switch(covmod[idxMarge]){
      case 1:
	flag = whittleMatern(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			     smooths[idxMarge], covariances);
	break;
      case 2:
	flag = cauchy(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
		      smooths[idxMarge], covariances);
	break;
      case 3:
	flag = powerExp(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			smooths[idxMarge], covariances);
	break;
      case 4:
	flag = bessel(distMat, nPairs, *dim, zero, sills[idxMarge], ranges[idxMarge],
		      smooths[idxMarge], covariances);
	break;
      }

      /* We need to fill in the upper triangular part of covMatChol with
	 covariances */
      {
	int current=-1;
	for (idxSite=0;idxSite<*nSite;idxSite++)
	  for (idxSite2=idxSite;idxSite2<*nSite;idxSite2++){
	    current++;
	    covMatChol[idxSite + idxSite2 * *nSite + idxMarge * nSite2] = covariances[current];
	  }
      }

      // Cholesky decomposition of the covariance matrices
      F77_CALL(dpotrf)("U", nSite, covMatChol + idxMarge * nSite2, nSite,
		       &info);

      // Compute the log of the determinant of the proposal cov. mat.
      logDet[idxMarge] = 0;
      for (idxSite=0;idxSite<*nSite;idxSite++)
	logDet[idxMarge] += log(covMatChol[idxSite * (1 + *nSite) + idxMarge *
					   nSite2]);

      logDet[idxMarge] *= 2;
    }


    /*----------------------------------------------------*/
    /*                                                    */
    /*          Updating the ranges (M.-H. step)          */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxMarge=0;idxMarge<3;idxMarge++){
      if (propRanges[idxMarge] == 0)
	continue;

      double rangeProp = rlnorm(log(ranges[idxMarge]), propRanges[idxMarge]),
	logpropRatio = log(rangeProp / ranges[idxMarge]);

      switch(covmod[idxMarge]){
      case 1:
	flag = whittleMatern(distMat, nPairs, zero, sills[idxMarge], rangeProp,
			     smooths[idxMarge], covariances);
	break;
      case 2:
	flag = cauchy(distMat, nPairs, zero, sills[idxMarge], rangeProp,
		      smooths[idxMarge], covariances);
	break;
      case 3:
	flag = powerExp(distMat, nPairs, zero, sills[idxMarge], rangeProp,
			smooths[idxMarge], covariances);
	break;
      case 4:
	flag = bessel(distMat, nPairs, *dim, zero, sills[idxMarge], rangeProp,
		      smooths[idxMarge], covariances);
	break;
      }

      if (flag != 0){
	extRates[3 + idxMarge]++;
	continue;
      }

      /* We need to fill in the upper triangular part of covMatPropChol
	 with covariances */
      {
	int current=-1;
	for (idxSite=0;idxSite<*nSite;idxSite++)
	  for (idxSite2=idxSite;idxSite2<*nSite;idxSite2++){
	    current++;
	    covMatPropChol[idxSite + idxSite2 * *nSite] = covariances[current];
	  }
      }

      // Cholesky decomposition of the proposal cov. mat.
      F77_CALL(dpotrf)("U", nSite, covMatPropChol, nSite, &info);

      if (info != 0){
	extRates[3 + idxMarge]++;
	continue;
      }

      // Log of the determinant of the proposal cov. mat.
      logDetProp = 0;
      for (idxSite=0;idxSite<*nSite;idxSite++)
	logDetProp += log(covMatPropChol[idxSite * (1 + *nSite)]);

      logDetProp *= 2;

      for (idxSite=0;idxSite<*nSite;idxSite++)
	resBottom[idxSite] = gevParams[idxSite + idxMarge * *nSite] -
	  GPmean[idxSite + idxMarge * *nSite];

      memcpy(resTop, resBottom, *nSite * sizeof(double));

      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
		      idxMarge * nSite2, nSite, resBottom, nSite);
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatPropChol,
		      nSite, resTop, nSite);

      double top = logDetProp, bottom = logDet[idxMarge],
	logpriorRatio = (hyperRange[2 * idxMarge] - 1) *
	log(rangeProp / ranges[idxMarge]) + (ranges[idxMarge] - rangeProp) /
	hyperRange[2 * idxMarge + 1];

      for (idxSite=0;idxSite<*nSite;idxSite++){
	top += resTop[idxSite] * resTop[idxSite];
	bottom += resBottom[idxSite] * resBottom[idxSite];
      }

      top *= -0.5;
      bottom *= -0.5;

      if (unif_rand() < exp(top - bottom + logpriorRatio + logpropRatio)){
	ranges[idxMarge] = rangeProp;
	logDet[idxMarge] = logDetProp;
	memcpy(covMatChol + idxMarge * nSite2, covMatPropChol, nSite2 *
	       sizeof(double));
	accRates[3 + idxMarge]++;
      }
    }

    /*----------------------------------------------------*/
    /*                                                    */
    /*         Updating the smooths (M.-H. step)          */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxMarge=0;idxMarge<3;idxMarge++){
      if (propSmooths[idxMarge] == 0)
	continue;

      double smoothProp = rlnorm(log(smooths[idxMarge]), propSmooths[idxMarge]),
	logpropRatio = log(smoothProp / smooths[idxMarge]);

      switch(covmod[idxMarge]){
      case 1:
	flag = whittleMatern(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			     smoothProp, covariances);
	break;
      case 2:
	flag = cauchy(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
		      smoothProp, covariances);
	break;
      case 3:
	flag = powerExp(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			smoothProp, covariances);
	break;
      case 4:
	flag = bessel(distMat, nPairs, *dim, zero, sills[idxMarge], ranges[idxMarge],
		      smoothProp, covariances);
	break;
      }

      if (flag != 0){
    	extRates[6 + idxMarge]++;
    	continue;
      }

      /* We need to fill in the upper triangular part of covMatPropChol
    	 with covariances */
      {
    	int current=-1;
    	for (idxSite=0;idxSite<*nSite;idxSite++)
    	  for (idxSite2=idxSite;idxSite2<*nSite;idxSite2++){
    	    current++;
    	    covMatPropChol[idxSite + idxSite2 * *nSite] = covariances[current];
    	  }
      }

      // Cholesky decomposition of the proposal cov. mat.
      F77_CALL(dpotrf)("U", nSite, covMatPropChol, nSite, &info);

      if (info != 0){
    	extRates[6 + idxMarge]++;
    	continue;
      }

      // Log of the determinant of the proposal cov. mat.
      logDetProp = 0;
      for (idxSite=0;idxSite<*nSite;idxSite++)
    	logDetProp += log(covMatPropChol[idxSite * (1 + *nSite)]);

      logDetProp *= 2;

      for (idxSite=0;idxSite<*nSite;idxSite++)
    	resBottom[idxSite] = gevParams[idxSite + idxMarge * *nSite] -
    	  GPmean[idxSite + idxMarge * *nSite];

      memcpy(resTop, resBottom, *nSite * sizeof(double));

      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatPropChol,
		      nSite, resTop, nSite);
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
		      idxMarge * nSite2, nSite, resBottom, nSite);

      double top = logDetProp, bottom = logDet[idxMarge],
    	logpriorRatio = (hyperSmooth[2 * idxMarge] - 1) *
    	log(smoothProp / smooths[idxMarge]) + (smooths[idxMarge] - smoothProp) /
    	hyperSmooth[2 * idxMarge + 1];

      for (idxSite=0;idxSite<*nSite;idxSite++){
    	top += resTop[idxSite] * resTop[idxSite];
    	bottom += resBottom[idxSite] * resBottom[idxSite];
      }

      top *= -0.5;
      bottom *= -0.5;

      if (unif_rand() < exp(top - bottom + logpriorRatio + logpropRatio)){
    	smooths[idxMarge] = smoothProp;
    	logDet[idxMarge] = logDetProp;
    	memcpy(covMatChol + idxMarge * nSite2, covMatPropChol, nSite2 *
    	       sizeof(double));
    	accRates[6 + idxMarge]++;
      }
    }

    iter++;

    //Need to store the new state into the mc object.
    if ((iter > *burnin) & ((iter % *thin) == 0)){
      mcLoc[nBeta[0] + iterThin * lagLoc] = sills[0];
      mcLoc[nBeta[0] + 1 + iterThin * lagLoc] = ranges[0];
      mcLoc[nBeta[0] + 2 + iterThin * lagLoc] = smooths[0];

      mcScale[nBeta[1] + iterThin * lagScale] = sills[1];
      mcScale[nBeta[1] + 1 + iterThin * lagScale] = ranges[1];
      mcScale[nBeta[1] + 2 + iterThin * lagScale] = smooths[1];

      mcShape[nBeta[2] + iterThin * lagShape] = sills[2];
      mcShape[nBeta[2] + 1 + iterThin * lagShape] = ranges[2];
      mcShape[nBeta[2] + 2 + iterThin * lagShape] = smooths[2];

      for (idxBeta=0;idxBeta<nBeta[0];idxBeta++)
	mcLoc[idxBeta + iterThin * lagLoc] = beta[idxBeta];

      for (idxBeta=0;idxBeta<nBeta[1];idxBeta++)
	mcScale[idxBeta + iterThin * lagScale] = beta[cumBeta[1] + idxBeta];

      for (idxBeta=0;idxBeta<nBeta[2];idxBeta++)
	mcShape[idxBeta + iterThin * lagShape] = beta[cumBeta[2] + idxBeta];

      for (idxSite=0;idxSite<*nSite;idxSite++){
	mcLoc[nBeta[0] + 3 + idxSite + iterThin * lagLoc] = gevParams[idxSite];
	mcScale[nBeta[1] + 3 + idxSite + iterThin * lagScale] = gevParams[*nSite + idxSite];
	mcShape[nBeta[2] + 3 + idxSite + iterThin * lagShape] = gevParams[2 * *nSite + idxSite];
      }
      iterThin++;
    }
  }
  GetRNGstate();

  for (int i=0;i<9;i++){
    accRates[i] /= (double) iter;
    extRates[i] /= (double) iter;
  }

  return;
}
コード例 #8
0
ファイル: IsotopeModel.C プロジェクト: BioITer/OpenMS
  void IsotopeModel::setSamples(const EmpiricalFormula & formula)
  {
    typedef std::vector<DoubleReal> ContainerType;
    ContainerType isotopes_exact;

    isotope_distribution_ = formula.getIsotopeDistribution(max_isotope_);

    isotope_distribution_.trimRight(trim_right_cutoff_);
    isotope_distribution_.renormalize();

    // compute the average mass (-offset)
    CoordinateType isotopes_mean = 0;
    Int i = 0;
    for (IsotopeDistribution::iterator iter = isotope_distribution_.begin();
         iter != isotope_distribution_.end(); ++iter, ++i)
    {
      isotopes_exact.push_back(iter->second);
      isotopes_mean += iter->second * i;
    }
    isotopes_mean *= isotope_distance_ / charge_;
    // (Need not divide by sum of probabilities, which is 1.)

    ///
    // "stretch" the averagine isotope distribution (so we can add datapoints between isotope peaks)
    ///
    size_t isotopes_exact_size = isotopes_exact.size();
    isotopes_exact.resize(size_t((isotopes_exact_size - 1) * isotope_distance_ / interpolation_step_ + 1.6)); // round up a bit more

    for (Size i = isotopes_exact_size - 1; i; --i)
    {
      // we don't need to move the 0-th entry
      isotopes_exact[size_t(CoordinateType(i) * isotope_distance_ / interpolation_step_ / charge_ + 0.5)]
        =   isotopes_exact[i];
      isotopes_exact[i] = 0;
    }

    ////
    // compute the Gaussian/Cauchy distribution (to be added for widening the averagine isotope distribution)
    ////
    ContainerType peak_shape_values_y;
    // fill a container with CoordinateType points (x values)
    CoordinateType peak_width = 0.0;
    if (param_.getValue("isotope:mode:mode") == "Gaussian")
    {
      // Actual width for values in the smooth table for normal distribution
      peak_width = isotope_stdev_ * 4.0;  // MAGIC alert, num stdev for smooth table for normal distribution
      ContainerType peak_shape_values_x;
      for (DoubleReal coord = -peak_width; coord <= peak_width;
           coord += interpolation_step_)
      {
        peak_shape_values_x.push_back(coord);
      }
      // compute normal approximation at these CoordinateType points (y values)
      Math::BasicStatistics<> normal_widening_model;
      normal_widening_model.setSum(1);
      normal_widening_model.setMean(0);
      normal_widening_model.setVariance(isotope_stdev_ * isotope_stdev_);
      normal_widening_model.normalApproximation(peak_shape_values_y, peak_shape_values_x);
    }
    else if (param_.getValue("isotope:mode:mode") == "Lorentzian")
    {
      peak_width = isotope_lorentz_fwhm_ * 8.0; // MAGIC alert: Lorentzian has infinite support, but we need to stop sampling at some point: 8*FWHM
      for (DoubleReal coord = -peak_width; coord <= peak_width;
           coord += interpolation_step_)
      {
        boost::math::cauchy_distribution<double> cauchy(0., isotope_lorentz_fwhm_ / 2.0);
        double x = boost::math::pdf(cauchy, coord);
        //double y = gsl_ran_cauchy_pdf(coord, isotope_lorentz_fwhm_/2.0);
        peak_shape_values_y.push_back(x); //cauchy is using HWHM not FWHM
      }
    }

    ///
    // fold the Gaussian/Lorentzian at each averagine peak, i.e. fill linear interpolation
    ///
    const ContainerType & left = isotopes_exact;
    const ContainerType & right = peak_shape_values_y;
    ContainerType & result = interpolation_.getData();
    result.clear();

    SignedSize r_max = std::min(SignedSize(left.size() + right.size() - 1),
                                SignedSize(2 * peak_width / interpolation_step_ * max_isotope_ + 1));
    result.resize(r_max, 0);

    // we loop backwards because then the small products tend to come first
    // (for better numerics)
    for (SignedSize i = left.size() - 1; i >= 0; --i)
    {
      if (left[i] == 0)
        continue;
      for (SignedSize j = std::min(r_max - i, SignedSize(right.size())) - 1; j >= 0; --j)
      {
        result[i + j] += left[i] * right[j];
      }
    }

    monoisotopic_mz_ = mean_ - isotopes_mean;
    interpolation_.setMapping(interpolation_step_, peak_width / interpolation_step_, monoisotopic_mz_);

    //std::cerr << "mono now: " << monoisotopic_mz_ << " mono easy: " << formula.getMonoWeight()/formula.getCharge() << "\n";

    // scale data so that integral over distribution equals one
    // multiply sum by interpolation_step_ -> rectangular approximation of integral
    IntensityType factor = scaling_ / (interpolation_step_ * std::accumulate(result.begin(), result.end(), IntensityType(0)));
    for (ContainerType::iterator iter = result.begin(); iter != result.end(); ++iter)
    {
      *iter *= factor;
    }
  }
コード例 #9
0
ファイル: simextremalt.c プロジェクト: cran/SpatialExtremes
void rextremaltcirc(int *nObs, int *ngrid, double *steps, int *dim,
		    int *covmod, double *nugget, double *range,
		    double *smooth, double *DoF, double *uBound, double *ans){
  /* This function generates random fields from the Schlather model

     nObs: the number of observations to be generated
    ngrid: the number of locations along one axis
      dim: the random field is generated in R^dim
   covmod: the covariance model
     nugget: the nugget parameter
    range: the range parameter
   smooth: the smooth parameter
      DoF: the degree of freedom
blockSize: see rextremalttbm
      ans: the generated random field */

  int i, j, k = -1, nbar = R_pow_di(*ngrid, *dim), r, m;
  const double zero = 0;
  double *rho, *irho, sill = 1 - *nugget;
    //Below is a table of highly composite numbers
  int HCN[39] = {1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240,
		 360, 720, 840, 1260, 1680, 2520, 5040, 7560,
		 10080, 15120, 20160, 25200, 27720, 45360, 50400,
		 55440, 83160, 110880, 166320, 221760, 277200,
		 332640, 498960, 554400, 665280, 720720, 1081080};

    
  /* Find the smallest size m for the circulant embedding matrix */
  {
    int dummy = 2 * (*ngrid - 1);
    do {
      k++;
      m = HCN[k];
    } while (m < dummy);
  }
  
  /* ---------- beginning of the embedding stage ---------- */
  int mbar = m * m, halfM = m / 2, notPosDef = 0;
  do {
    double *dist = (double *)R_alloc(mbar, sizeof(double));

    notPosDef = 0;
    //Computation of the distance
    for (r=mbar;r--;){
      i = r % m;
      j = r / m;
      
      if (i > halfM)
	i -= m;
      
      if (j > halfM)
	j -= m;
      
      dist[r] = hypot(steps[0] * i, steps[1] * j);
    }

    //Computations of the covariances
    rho = (double *)R_alloc(mbar, sizeof(double));
    irho = (double *)R_alloc(mbar, sizeof(double));
    for (i=mbar;i--;)
      irho[i] = 0;

    switch (*covmod){
    case 1:
      whittleMatern(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 2:
      cauchy(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 3:
      powerExp(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 4:
      bessel(dist, mbar, *dim, zero, sill, *range, *smooth, rho);
      break;
    }

    /* Compute the eigen values to check if the circulant embbeding
       matrix is positive definite */

    /* Note : The next lines is only valid for 2d random fields. I
       need to change if there are m_1 \neq m_2 as I suppose that m_1
       = m_2 = m */
    int maxf, maxp, *iwork;
    double *work;

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, m, m, 1, -1, work, iwork);

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, 1, m, m, -1, work, iwork);

    //Check if the eigenvalues are all positive
    for (i=mbar;i--;){
      notPosDef |= (rho[i] <= 0) || (fabs(irho[i]) > 0.001);
    }

    if (notPosDef){
      k++;
      m = HCN[k];
      halfM = m / 2;
      mbar = m * m;
    }

    if (k > 30)
      error("Impossible to embbed the covariance matrix");
    
  } while (notPosDef);
  /* --------- end of the embedding stage --------- */

  /* Computation of the square root of the eigenvalues */
  for (i=mbar;i--;){
    rho[i] = sqrt(rho[i]);
    irho[i] = 0;//No imaginary part
  }

  int mdag = m / 2 + 1, mdagbar = mdag * mdag;
  double isqrtMbar = 1 / sqrt(mbar);

  double *a = malloc(mbar * sizeof(double)),
    *ia = malloc(mbar * sizeof(double)),
    *gp = malloc(nbar * sizeof(double));

  GetRNGstate();
  for (int i=*nObs;i--;){
    int nKO = nbar;
    double poisson = 0;

    while (nKO){
      poisson += exp_rand();
      double ipoisson = 1 / poisson,
	thresh = *uBound * ipoisson;
      
      /* We simulate one realisation of a gaussian random field with
	 the required covariance function */
      circcore(rho, a, ia, m, halfM, mdag, mdagbar, *ngrid, nbar, isqrtMbar, *nugget, gp);
      
      nKO = nbar;
      for (int j=nbar;j--;){
	double dummy = R_pow(fmax2(gp[j], 0), *DoF) * ipoisson;
	ans[j + i * nbar] = fmax2(dummy, ans[j + i * nbar]);
	nKO -= (thresh <= ans[j + i * nbar]);
      }
    }
  }
  
  PutRNGstate();
  
  //Lastly we multiply by the normalizing constant
  const double imean = M_SQRT_PI * R_pow(2, -0.5 * (*DoF - 2)) /
    gammafn(0.5 * (*DoF + 1));
  for (i=(nbar * *nObs);i--;)
    ans[i] *= imean;
  
  free(a); free(ia); free(gp);
  return;
}
コード例 #10
0
ファイル: simgeometric.c プロジェクト: cran/SpatialExtremes
void rgeomcirc(int *nObs, int *ngrid, double *steps, int *dim,
	       int *covmod, double *sigma2, double *nugget, double *range,
	       double *smooth, double *uBound, double *ans){
  /* This function generates random fields from the geometric model

     nObs: the number of observations to be generated
    ngrid: the number of locations along one axis
      dim: the random field is generated in R^dim
   covmod: the covariance model
     nugget: the nugget parameter
    range: the range parameter
   smooth: the smooth parameter
   uBound: the uniform upper bound for the stoch. proc.
      ans: the generated random field */

  int i, j, k = -1, nbar = R_pow_di(*ngrid, *dim), r, m;
  const double loguBound = log(*uBound), halfSigma2 = 0.5 * *sigma2,
    zero = 0;
  double sigma = sqrt(*sigma2), sill = 1 - *nugget, *rho, *irho, *dist;

  //Below is a table of highly composite numbers
  int HCN[39] = {1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240,
		 360, 720, 840, 1260, 1680, 2520, 5040, 7560,
		 10080, 15120, 20160, 25200, 27720, 45360, 50400,
		 55440, 83160, 110880, 166320, 221760, 277200,
		 332640, 498960, 554400, 665280, 720720, 1081080};

    
  /* Find the smallest size m for the circulant embedding matrix */
  {
    int dummy = 2 * (*ngrid - 1);
    do {
      k++;
      m = HCN[k];
    } while (m < dummy);
  }
  
  /* ---------- beginning of the embedding stage ---------- */
  int mbar = m * m, halfM = m / 2, notPosDef = 0;
  do {
    dist = (double *)R_alloc(mbar, sizeof(double));

    notPosDef = 0;
    //Computation of the distance
    for (r=mbar;r--;){
      i = r % m;
      j = r / m;
      
      if (i > halfM)
	i -= m;
      
      if (j > halfM)
	j -= m;
      
      dist[r] = hypot(steps[0] * i, steps[1] * j);
    }

    //Computations of the covariances
    rho = (double *)R_alloc(mbar, sizeof(double));
    irho = (double *)R_alloc(mbar, sizeof(double));
    for (i=mbar;i--;)
      irho[i] = 0;

    switch (*covmod){
    case 1:
      whittleMatern(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 2:
      cauchy(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 3:
      powerExp(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 4:
      bessel(dist, mbar, *dim, zero, sill, *range, *smooth, rho);
      break;
    }

    /* Compute the eigen values to check if the circulant embbeding
       matrix is positive definite */

    /* Note : The next lines is only valid for 2d random fields. I
       need to change if there are m_1 \neq m_2 as I suppose that m_1
       = m_2 = m */
    int maxf, maxp;

    fft_factor(m, &maxf, &maxp);
    double *work = (double *)R_alloc(4 * maxf, sizeof(double));
    int *iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, m, m, 1, -1, work, iwork);

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, 1, m, m, -1, work, iwork);

    //Check if the eigenvalues are all positive
    for (i=mbar;i--;){
      notPosDef |= (rho[i] <= 0) || (fabs(irho[i]) > 0.001);
    }

    if (notPosDef){
      k++;
      m = HCN[k];
      halfM = m / 2;
      mbar = m * m;
    }

    if (k > 30)
      error("Impossible to embbed the covariance matrix");
    
  } while (notPosDef);
  /* --------- end of the embedding stage --------- */

  /* Computation of the square root of the eigenvalues */
  for (i=mbar;i--;){
    rho[i] = sqrt(rho[i]);
    irho[i] = 0;//No imaginary part
  }

  int mdag = m / 2 + 1, mdagbar = mdag * mdag;
  double isqrtMbar = 1 / sqrt(mbar);

  double *a = (double *)R_alloc(mbar, sizeof(double));
  double *ia = (double *)R_alloc(mbar, sizeof(double));
  
  GetRNGstate();
  for (i=*nObs;i--;){
    int nKO = nbar;
    double poisson = 0;
    
    while (nKO) {
      /* The stopping rule is reached when nKO = 0 i.e. when each site
	 satisfies the condition in Eq. (8) of Schlather (2002) */
      int j;
      double *gp = (double *)R_alloc(nbar, sizeof(double));
      
      poisson += exp_rand();
      double ipoisson = -log(poisson), thresh = loguBound + ipoisson;
      
      /* We simulate one realisation of a gaussian random field with
	 the required covariance function */
      circcore(rho, a, ia, m, halfM, mdag, mdagbar, *ngrid, nbar, isqrtMbar, *nugget, gp);
      
      nKO = nbar;
      double ipoissonMinusHalfSigma2 = ipoisson - halfSigma2;
      for (j=nbar;j--;){
	ans[j + i * nbar] = fmax2(sigma * gp[j] + ipoissonMinusHalfSigma2,
				  ans[j + i * nbar]);
	nKO -= (thresh <= ans[j + i * nbar]);
	
      }
    }
  }
  
  PutRNGstate();

  /* So fare we generate a max-stable process with standard Gumbel
     margins. Switch to unit Frechet ones */
  for (i=*nObs * nbar;i--;)
    ans[i] = exp(ans[i]);
  
  return;
}
コード例 #11
0
ファイル: circulant.c プロジェクト: cran/SpatialExtremes
void circemb(int *nsim, int *ngrid, double *steps, int *dim, int *covmod,
	     double *nugget, double *sill, double *range, double *smooth,
	     double *ans){

  int i, j, k = -1, r, nbar = *ngrid * *ngrid, m;
  //irho is the imaginary part of the covariance -> 0
  double *rho, *irho;
  const double zero = 0;
  //Below is a table of highly composite numbers
  int HCN[39] = {1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240,
		 360, 720, 840, 1260, 1680, 2520, 5040, 7560,
		 10080, 15120, 20160, 25200, 27720, 45360, 50400,
		 55440, 83160, 110880, 166320, 221760, 277200,
		 332640, 498960, 554400, 665280, 720720, 1081080};

    
  /* Find the smallest size m for the circulant embedding matrix */
  {
    int dummy = 2 * (*ngrid - 1);
    do {
      k++;
      m = HCN[k];
    } while (m < dummy);
  }
  
  /* ---------- beginning of the embedding stage ---------- */
  int mbar = m * m, halfM = m / 2, notPosDef = 0;
  do {
    double *dist = malloc(mbar * sizeof(double));

    notPosDef = 0;
    //Computation of the distance
    for (r=mbar;r--;){
      i = r % m;
      j = r / m;
      
      if (i > halfM)
	i -= m;
      
      if (j > halfM)
	j -= m;
      
      dist[r] = hypot(steps[0] * i, steps[1] * j);
    }

    //Computations of the covariances
    rho = (double *)R_alloc(mbar, sizeof(double));
    irho = (double *)R_alloc(mbar, sizeof(double));

    for (i=mbar;i--;)
      irho[i] = 0;

    switch (*covmod){
    case 1:
      whittleMatern(dist, mbar, zero, *sill, *range, *smooth, rho);
      break;
    case 2:
      cauchy(dist, mbar, zero, *sill, *range, *smooth, rho);
      break;
    case 3:
      powerExp(dist, mbar, zero, *sill, *range, *smooth, rho);
      break;
    case 4:
      bessel(dist, mbar, *dim, zero, *sill, *range, *smooth, rho);
      break;
    }

    /* Compute the eigen values to check if the circulant embbeding
       matrix is positive definite */

    /* Note : The next lines is only valid for 2d random fields. I
       need to change if there are m_1 \neq m_2 as I suppose that m_1
       = m_2 = m */
    int maxf, maxp, *iwork;
    double *work;

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, m, m, 1, -1, work, iwork);

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, 1, m, m, -1, work, iwork);

    //Check if the eigenvalues are all positive
    for (i=mbar;i--;){
      notPosDef |= (rho[i] < 0) || (fabs(irho[i]) > 0.001);
    }

    if (notPosDef){
      k++;
      m = HCN[k];
      halfM = m / 2;
      mbar = m * m;
    }

    if (k > 30)
      error("Impossible to embbed the covariance matrix");

    free(dist);
    
  } while (notPosDef);
  /* --------- end of the embedding stage --------- */

  /* Computation of the square root of the eigenvalues */
  for (i=mbar;i--;){
    rho[i] = sqrt(rho[i]);
    irho[i] = 0;//No imaginary part
  }

  int mdag = m / 2 + 1, mdagbar = mdag * mdag;
  double isqrtMbar = 1 / sqrt(mbar);

  double *a = malloc(mbar * sizeof(double)),
    *ia = malloc(mbar * sizeof(double));
    
  GetRNGstate();
  for (k=*nsim;k--;){
    
    /* ---------- Simulation from \Lambda^1/2 Q* Z ------------ */
    for (r=mdagbar;r--;){
      /* Below is the procedure 5.2.4 in Wood and Chan */

      //Computation of the cardinality of A(j)
      int j1, j2,i = r % mdag, j = r / mdag;
      double u, v;

      int card = (i != 0) * (i != halfM) + 2 * (j != 0) * (j != halfM);
      
      switch (card){
      case 3:
	//B(1) = {1}, B^c(1) = {2}
	j1 = (m - i) + m * j;
	j2 = i + m * (m - j);
	u = norm_rand();
	v = norm_rand();
	a[j1] = ia[j1] = M_SQRT1_2 * rho[j1];
	a[j1] *= u;
	ia[j1] *= v;
	a[j2] = ia[j2] = M_SQRT1_2 * rho[j2];
	a[j2] *= u;
	ia[j2] *= -v;
	
	//B(2) = {1,2}, B^c(2) = {0}
	j1 = (m - i) + m * (m - j);
	j2 = i + m * j;
	u = norm_rand();
	v = norm_rand();
	a[j1] = ia[j1] = M_SQRT1_2 * rho[j1];
	a[j1]*= u;
	ia[j1] *= v;
	a[j2] = ia[j2] = M_SQRT1_2 * rho[j2];
	a[j2]*= u;
	ia[j2] *= -v;      
	break;
      case 1:
	//B(1) = 0, B^c(1) = {1}
	j1 = i + m * j;
	j2 = m - i + m * j;
	u = norm_rand();
	v = norm_rand();
	a[j1] = ia[j1] = M_SQRT1_2 * rho[j1];
	a[j1] *= u;
	ia[j1] *= v;
	a[j2] = ia[j2] = M_SQRT1_2 * rho[j2];
	a[j2] *= u;
	ia[j2] *= -v;
	break;
      case 2:
	//B(1) = 0, B^c(1) = {2}
	j1 = i + m * j;
	j2 = i + m * (m - j);
	u = norm_rand();
	v = norm_rand();
	a[j1] = ia[j1] = M_SQRT1_2 * rho[j1];
	a[j1] *= u;
	ia[j1] *= v;
	a[j2] = ia[j2] = M_SQRT1_2 * rho[j2];
	a[j2] *= u;
	ia[j2] *= -v;
	break;
      case 0:
	j1 = i + m * j;
	a[j1] = rho[j1] * norm_rand();
	ia[j1] = 0;
	break;      
      }
    }

    /* ---------- Computation of Q \Lambda^1/2 Q* Z ------------ */
    int maxf, maxp, *iwork;
    double *work;
    
    /* The next lines is only valid for 2d random fields. I need to
       change if m_1 \neq m_2 as here I suppose that m_1 = m_2 = m */
    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(a, ia, m, m, 1, -1, work, iwork);
    
    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(a, ia, 1, m, m, -1, work, iwork);
        
    for (i=nbar;i--;)
      ans[i + k * nbar] = isqrtMbar * a[i % *ngrid + m * (i / *ngrid)];
  }
  PutRNGstate();  
    
  if (*nugget > 0){
    int dummy = *nsim * nbar;
    double sqrtNugget = sqrt(*nugget);
    
    GetRNGstate();
    for (i=dummy;i--;)
      ans[i] += sqrtNugget * norm_rand();

    PutRNGstate();
  }

  free(a); free(ia);

  return;
}
コード例 #12
0
ファイル: direct.c プロジェクト: cran/SpatialExtremes
void buildcovmat(int *nSite, int *grid, int *covmod, double *coord, int *dim,
		 double *nugget, double *sill, double *range,
		 double *smooth, double *covMat){

  int nPairs, effnSite = *nSite, zero = 0;
  const double one = 1, dzero = 0;
  double flag = 0;

  if (*grid)
    effnSite = R_pow_di(effnSite, *dim);

  nPairs = effnSite * (effnSite - 1) / 2;

  double *dist = malloc(nPairs * sizeof(double)),
    *rho = malloc(nPairs * sizeof(double)),
    *coordGrid = malloc(effnSite * *dim * sizeof(double));

  if (*grid){
    //Coord specify a grid
    for (int i = 0; i < *nSite; i++)
      for (int j = 0; j < *nSite; j++){
	coordGrid[j + i * *nSite] = coord[i];
	coordGrid[*nSite * (*nSite + i) + j] = coord[j];
      }

    distance(coordGrid, dim, &effnSite, &zero, dist);
  }

  else
    //Coord don't specify a grid
    distance(coord, dim, nSite, &zero, dist);

  switch (*covmod){
  case 1:
    flag = whittleMatern(dist, nPairs, dzero, one, *range, *smooth, rho);
    break;
  case 2:
    flag = cauchy(dist, nPairs, dzero, one, *range, *smooth, rho);
    break;
  case 3:
    flag = powerExp(dist, nPairs, dzero, one, *range, *smooth, rho);
    break;
  case 4:
    flag = bessel(dist, nPairs, *dim, dzero, one, *range, *smooth, rho);
    break;
  case 6:
    if (*grid)
      flag = fbm(coordGrid, dist, *dim, effnSite, one, *range, *smooth, rho);

    else
      flag = fbm(coord, dist, *dim, effnSite, one, *range, *smooth, rho);

    break;
  }

  if (flag != 0.0)
    error("The covariance parameters seem to be ill-defined. Please check\n");

  //Fill the non-diagonal elements of the covariance matrix
  //#pragma omp parallel for
  for (int currentPair=0;currentPair<nPairs;currentPair++){
    int i = 0, j = 0;
    getSiteIndex(currentPair, effnSite, &i, &j);
    covMat[effnSite * i + j] = covMat[effnSite * j + i] = *sill * rho[currentPair];
  }

  //Fill the diagonal elements of the covariance matrix
  if (*covmod == 6){
    //Fractional brownian
    double irange2 = 1 / (*range * *range);

    if (*grid){
      for (int i = 0; i < effnSite;i++){
	covMat[i * (effnSite + 1)] = 0;

	for (int j= 0; j < *dim; j++)
	  covMat[i * (effnSite + 1)] += coordGrid[i + j * effnSite] * coordGrid[i + j * effnSite];

	covMat[i * (effnSite + 1)] = 2 * pow(covMat[i * (effnSite + 1)] * irange2, 0.5 * *smooth);
      }
    }

    else {
      for (int i = 0; i < effnSite; i++){
	covMat[i * (effnSite + 1)] = 0;

	for (int j = 0; j < *dim; j++)
	  covMat[i * (effnSite + 1)] += coord[i + j * effnSite] * coord[i + j * effnSite];

	covMat[i * (effnSite + 1)] = 2 * pow(covMat[i * (effnSite + 1)] * irange2, 0.5 * *smooth);
      }
    }
  }

  else
    for (int i = 0; i < effnSite; i++)
      covMat[i * (effnSite + 1)] = *sill + *nugget;


  free(dist); free(rho); free(coordGrid);
  return;
}
コード例 #13
0
ファイル: extremalt.c プロジェクト: cran/SpatialExtremes
void extremaltdsgnmat(int *covmod, double *data, double *dist, int *nSite, int *nObs, int *dim,
		      int *weighted, double *weights, double *locdsgnmat, double *locpenmat,
		      int *nloccoeff, int *npparloc, double *locpenalty, double *scaledsgnmat,
		      double *scalepenmat, int *nscalecoeff, int *npparscale,
		      double *scalepenalty, double *shapedsgnmat, double *shapepenmat,
		      int *nshapecoeff, int *npparshape, double *shapepenalty, int *usetempcov,
		      double *tempdsgnmatloc, double *temppenmatloc, int *ntempcoeffloc,
		      int *nppartempcoeffloc, double *temppenaltyloc, double *tempdsgnmatscale,
		      double *temppenmatscale, int *ntempcoeffscale, int *nppartempcoeffscale,
		      double *temppenaltyscale, double *tempdsgnmatshape, double *temppenmatshape,
		      int *ntempcoeffshape, int *nppartempcoeffshape, double *temppenaltyshape,
		      double *loccoeff, double *scalecoeff, double *shapecoeff,
		      double *tempcoeffloc, double *tempcoeffscale, double *tempcoeffshape,
		      double *nugget, double *range, double *smooth, double *smooth2, double *df,
		      double *dns){
  //This is the extremal t model. It's named xxxdsgnmat as either linear
  //models or p-splines are used for the gev parameters.

  const int nPairs = *nSite * (*nSite - 1) / 2;
  int flag = usetempcov[0] + usetempcov[1] + usetempcov[2];

  double *trendlocs = malloc(*nObs * sizeof(double)),
    *trendscales = malloc(*nObs * sizeof(double)),
    *trendshapes = malloc(*nObs * sizeof(double)),
    *jac = malloc(*nSite * *nObs * sizeof(double)),
    *rho = malloc(nPairs * sizeof(double)),
    *locs = malloc(*nSite * sizeof(double)),
    *scales = malloc(*nSite * sizeof(double)),
    *shapes = malloc(*nSite * sizeof(double)),
    *frech = malloc(*nSite * *nObs * sizeof(double));

  if (*df <= 0){
    *dns = (1 - *df) * (1 - *df) * MINF;
    return;
  }

  /*else if (*df >= 15){
    *dns = (*df - 14) * (*df - 14) * MINF;
    return;
    }*/

  if (*nugget >= 1){
    *dns = *nugget * *nugget * MINF;
    return;
  }

  //Stage 1: Compute the covariance at each location
  switch (*covmod){
  case 1:
    *dns = whittleMatern(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 2:
    *dns = cauchy(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 3:
    *dns = powerExp(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 4:
    *dns = bessel(dist, nPairs, *dim, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 5:
    *dns = caugen(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, *smooth2, rho);
    break;
  }

  if (*dns != 0)
    return;

  //Stage 2: Computing the GEV parameters using the design matrix
  *dns = dsgnmat2Param(locdsgnmat, scaledsgnmat, shapedsgnmat, loccoeff, scalecoeff, shapecoeff,
		       *nSite, *nloccoeff, *nscalecoeff, *nshapecoeff, locs, scales, shapes);

  if (flag){
    dsgnmat2temptrend(tempdsgnmatloc, tempdsgnmatscale, tempdsgnmatshape, tempcoeffloc,
		      tempcoeffscale, tempcoeffshape, *nSite, *nObs, usetempcov, *ntempcoeffloc,
		      *ntempcoeffscale, *ntempcoeffshape, trendlocs, trendscales, trendshapes);

    for (int i=0;i<*nSite;i++)
      for (int j=0;j<*nObs;j++)
	if (((scales[i] + trendscales[j]) <= 0) || ((shapes[i] + trendshapes[j]) <= -1)){
	  *dns = MINF;
	  return;
	}
  }

  else if (*dns != 0.0)
    return;

  //Stage 3: Transformation to unit Frechet
  if (flag)
    *dns = gev2frechTrend(data, *nObs, *nSite, locs, scales, shapes, trendlocs, trendscales,
			  trendshapes, jac, frech);

  else
    *dns = gev2frech(data, *nObs, *nSite, locs, scales, shapes, jac, frech);

  if (*dns != 0.0)
    return;

  if (*weighted)
    *dns = wlplikextremalt(frech, rho, *df, jac, *nObs, *nSite, weights);

  else
    *dns = lplikextremalt(frech, rho, *df, jac, *nObs, *nSite);

  //Stage 5: Removing the penalizing terms (if any)
  // 1- For the location parameter
  if (*locpenalty > 0)
    *dns -= penalization(locpenmat, loccoeff, *locpenalty, *nloccoeff, *npparloc);

  // 2- For the scale parameter
  if (*scalepenalty > 0)
    *dns -= penalization(scalepenmat, scalecoeff, *scalepenalty, *nscalecoeff, *npparscale);

  // 3- For the shape parameter
  if (*shapepenalty > 0)
    *dns -= penalization(shapepenmat, shapecoeff, *shapepenalty, *nshapecoeff, *npparshape);

  // 4- Doing the same thing for the temporal component
  if (*temppenaltyloc > 0)
    *dns -= penalization(temppenmatloc, tempcoeffloc, *temppenaltyloc, *ntempcoeffloc,
			 *nppartempcoeffloc);

  if (*temppenaltyscale > 0)
    *dns -= penalization(temppenmatscale, tempcoeffscale, *temppenaltyscale, *ntempcoeffscale,
			 *nppartempcoeffscale);

  if (*temppenaltyshape > 0)
    *dns -= penalization(temppenmatshape, tempcoeffshape, *temppenaltyshape, *ntempcoeffshape,
			 *nppartempcoeffshape);

  // 4- Doing the same thing for the temporal component
  if (*temppenaltyloc > 0)
    *dns -= penalization(temppenmatloc, tempcoeffloc, *temppenaltyloc, *ntempcoeffloc,
			 *nppartempcoeffloc);

  if (*temppenaltyscale > 0)
    *dns -= penalization(temppenmatscale, tempcoeffscale, *temppenaltyscale, *ntempcoeffscale,
			 *nppartempcoeffscale);

  if (*temppenaltyshape > 0)
    *dns -= penalization(temppenmatshape, tempcoeffshape, *temppenaltyshape, *ntempcoeffshape,
			 *nppartempcoeffshape);

  free(trendlocs); free(trendscales); free(trendshapes); free(jac); free(rho); free(locs);
  free(scales); free(shapes); free(frech);
  return;
}
コード例 #14
0
ファイル: extremalt.c プロジェクト: cran/SpatialExtremes
void extremaltfull(int *covmod, double *data, double *dist, int *nSite, int *nObs,
		   int *dim, int *weighted, double *weights, double *locs, double *scales,
		   double *shapes, double *nugget, double *range, double *smooth, double *smooth2,
		   double *df, int *fitmarge, double *dns){
  //This is the extremal t model. It's a wrapper to several
  //sub-functions. It's named xxxfull as it either assume that the
  //margins are unit Frechet, or the GEV parameters are estimated at
  //each locations.

  const int nPairs = *nSite * (*nSite - 1) / 2;

  double *jac = malloc(*nSite * *nObs * sizeof(double)),
    *rho = malloc(nPairs * sizeof(double)),
    *frech = malloc(*nSite * *nObs * sizeof(double));

  //Some preliminary steps: Valid points?
  if (*fitmarge){
    for (int i=0;i<*nSite;i++){
      if ((scales[i] <= 0) || (shapes[i] <= -1)){
	*dns = MINF;
	return;
      }
    }
  }

  if (*df <= 0){
    *dns = (1 - *df) * (1 - *df) * MINF;
    return;
  }

  /*else if (*df >= 15){
    *dns = (*df - 14) * (*df - 14) * MINF;
    return;
    }*/

  if (*nugget >= 1){
    *dns = *nugget * *nugget * MINF;
    return;
  }

  //Stage 1: Compute the covariance at each location
  switch (*covmod){
  case 1:
    *dns = whittleMatern(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 2:
    *dns = cauchy(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 3:
    *dns = powerExp(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 4:
    *dns = bessel(dist, nPairs, *dim, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 5:
    *dns = caugen(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, *smooth2, rho);
    break;
  }

  if (*dns != 0.0)
    return;

  //Stage 2: Transformation to unit Frechet
  if (*fitmarge){
    *dns = gev2frech(data, *nObs, *nSite, locs, scales, shapes, jac, frech);

    if (*dns != 0.0)
      return;

    if (*weighted)
      *dns = wlplikextremalt(frech, rho, *df, jac, *nObs, *nSite, weights);

    else
      *dns = lplikextremalt(frech, rho, *df, jac, *nObs, *nSite);
  }

  else {
    for (int i=0;i<(*nSite * *nObs);i++)
      jac[i] = 0;

    if (*weighted)
      *dns = wlplikextremalt(data, rho, *df, jac, *nObs, *nSite, weights);

    else
      *dns = lplikextremalt(data, rho, *df, jac, *nObs, *nSite);
  }


  free(jac); free(rho); free(frech);
  return;
}
コード例 #15
0
ファイル: cpoly.c プロジェクト: PDLPorters/pdl
int cpoly(double opr[], double opi[], int degree,
	   double zeror[], double zeroi[])
{
  /* Finds the zeros of a complex polynomial.

     opr, opi - double precision vectors of real and imaginary parts 
                of the coefficients in order of decreasing powers.
     degree   - integer degree of polynomial
     zeror, zeroi  
              - output double precision vectors of real and imaginary 
	        parts of the zeros.
     fail     - output logical parameter, TRUE if leading coefficient 
                is zero, if cpoly has found fewer than degree zeros,
                or if there is another internal error.

     The program has been written to reduce the chance of overflow
     occurring.  If it does occur, there is still a possibility that
     the zerofinder will work provided the overflowed quantity is
     replaced by a large number. */
  
  double xx,yy,xxx,zr,zi,bnd;
  int fail,conv;
  int cnt1,cnt2,i,idnn2;

  /* initialization of constants */
  nn = degree+1;
  if (!init(nn)) {
    fail = TRUE;
    return fail;
  }

  xx = .70710678L;
  yy = -xx;
  fail = FALSE;

  /* algorithm fails if the leading coefficient is zero. */
  if (opr[0] == 0.0 && opi[0] == 0.0) {
    fail = TRUE;
    return fail;
  }

  /* Remove the zeros at the origin if any */
  while (opr[nn-1] == 0.0 && opi[nn-1] == 0.0) {
    idnn2 = degree+1-nn;
    zeror[idnn2] = 0.0;
    zeroi[idnn2] = 0.0;
    nn--;
  }

  /* Make a copy of the coefficients */
  for (i=0;i<nn;i++) {
    pr[i] = opr[i];
    pi[i] = opi[i];
    shr[i] = cmod(pr[i],pi[i]);
  }

  /* Scale the polynomial */
  bnd = scale(nn,shr);
  if (bnd != 1.0) {
    for (i=0;i<nn;i++) {
      pr[i] *= bnd;
      pi[i] *= bnd;
    }
  }

  while (!fail) {

    /* Start the algorithm for one zero */
    if (nn < 3) {
      /* Calculate the final zero and return */
      cdivid(-pr[1],-pi[1],pr[0],pi[0],&(zeror[degree-1]),&(zeroi[degree-1]));
      return fail;
    }

    /* Calculate bnd, a lower bound on the modulus of the zeros */
    for (i=0;i<nn;i++) {
      shr[i] = cmod(pr[i],pi[i]);
    }
    bnd = cauchy(nn,shr,shi);

    /* Outer loop to control 2 major passes with different sequences
       of shifts */
    fail = TRUE;
    for(cnt1=1;fail && (cnt1<=2);cnt1++) {

      /* First stage calculation, no shift */
      noshft(5);

      /* Inner loop to select a shift. */
      for (cnt2=1;fail && (cnt2<10);cnt2++) {
	/* Shift is chosen with modulus bnd and amplitude rotated by
	   94 degrees from the previous shift */
	xxx = COSR*xx-SINR*yy;
	yy  = SINR*xx+COSR*yy;
	xx  = xxx;
	sr  = bnd*xx;
	si  = bnd*yy;

	/* Second stage calculation, fixed shift */
	conv = fxshft(10*cnt2,&zr,&zi);
	if (conv) {

	  /* The second stage jumps directly to the third stage iteration
	     If successful the zero is stored and the polynomial deflated */
	  idnn2 = degree+1-nn;
	  zeror[idnn2] = zr;
	  zeroi[idnn2] = zi;
	  nn--;
	  for(i=0;i<nn;i++) {
	    pr[i] = qpr[i];
	    pi[i] = qpi[i];
	  }
	  fail = FALSE;
	}
	/* If the iteration is unsuccessful another shift is chosen */
      }
      /* If 9 shifts fail, the outer loop is repeated with another
	 sequence of shifts */
    }
  }

  /* The zerofinder has failed on two major passes
     Return empty handed */
  return fail;
}
コード例 #16
0
ファイル: cpoly.C プロジェクト: jakobkroeker/img
// Main function
//
int cpoly(int degree, const xcomplex poly[], xcomplex Roots[])
{
  xcomplex PhiDiff = -0.069756473 + 0.99756405i;
  xcomplex PhiRand = (1.0-1.0i) /sqrt(2.0);
  xcomplex P[degree+1], H[degree+1], h[degree+1], p[degree+1], zero, s, bnd;
  unsigned int conv = 0;

  for (int i = 0; i <= degree; i++)
    if (isnan(poly[i]))
      return -1; // otherwise we may get stuck in infinite loops

  while(poly[0] == xdata.ZERO) {
    poly++;
    degree--;
    if (degree < 0)
      return -1;
  };

  int deg = degree;

  // Remove the zeros at the origin if any
  while(poly[deg] == xdata.ZERO){
    Roots[degree - deg] = xdata.ZERO;
    deg--;
  }

  if (deg == 0) return degree;
 
  // Make a copy of the coefficients
  for(int i = 0; i <= deg; i++) { P[i] = poly[i]; }

  scale(deg, P);

 search:

  if(deg <= 1){
    Roots[degree-1] = - P[1] / P[0];
    return degree;
  }

  // compute a bound of the moduli of the roots (Newton-Raphson)
  bnd = cauchy(deg, P);
     
  // Outer loop to control 2 Major passes with different sequences of shifts
  for(int cnt1 = 1; cnt1 <= 2; cnt1++) {
    // First stage  calculation , no shift
    noshft(5, deg, P, H);
  
    // Inner loop to select a shift
    for(int cnt2 = 1; cnt2 <= 9; cnt2++) {
      // Shift is chosen with modulus bnd and amplitude rotated by 94 degree from the previous shif
      PhiRand = PhiDiff * PhiRand;
      s = bnd * PhiRand;

      // Second stage calculation, fixed shift
      conv = fxshft(10 * cnt2, deg, P, p, H, h, &zero, &s);
      if(conv) {
	// The second stage jumps directly to the third stage iteration
	// If successful the zero is stored and the polynomial deflated
	Roots[degree - deg] = zero;

	// continue with the remaining polynomial
	deg--;
	for(int i = 0; i <= deg; i++){ P[i] = p[i]; };
	goto search;
      }
      // if the iteration is unsuccessful another shift is chosen
    }

    // if 9 shifts fail, the outer loop is repeated with another sequence of shifts
  }

  // The zerofinder has failed on two major passes
  // return empty handed with the number of roots found (less than the original degree)
  return degree - deg;
}