Exemplo n.º 1
0
double bessel(double x, int n){
  //nth term of a bessel func sequence
  if (n == 0 & x == 0) return 1;
  if (n != 0 & x == 0) return 0;
  if (n == 0) return sin(x)/x;
  if (n == 1) return sin(x)/(x*x) - cos(x)/x;
  return ((2*(n-1)+1)*bessel(n-1,x) - x*bessel(n-2,x))/x;
}
Exemplo n.º 2
0
double kaiser(int i,int n,double beta)
{
	/* 函数名称:
	 * 	凯瑟函数
	 * */
	double a,w,a2,b1,b2,beta1;
	b1=bessel(beta);
	a=2.0*i/(double)(n-1)-1.0;
	a2=a*a;
	beta1=beta*sqrt(1.0-a2);
	b2=bessel(beta1);
	w=b2/b1;
	return w;

	}
Exemplo n.º 3
0
double kaiserbessel (double x, double y, double M)
{
  double d = 1. - ((x*x + y*y) / (M * M));
  if (d <= 0.)
    return 0.;
  return bessel (M_PI * alpha * sqrt (d)) / bbeta;
}
Exemplo n.º 4
0
int main(int argc, char **argv){

  int x = atoi(argv[1]);
  int N = atoi(argv[2]);

  int i;
  for(i=1; i<=N; i++)
  {
  double f1before, f, f1after;

  f = bessel(i,x);
  f1before = bessel(i-1,x);
  f1after = bessel(i+1,x);

  printf("\n");
  printf("n:%d \n", i);
  printf("When plugged into fn:  f1before:%f  fn:%f  f1after:%f \n", f1before, f, f1after);
  printf("Downward Recursion:    f1before:%f  fn:%f  f1after:%f \n", (((2*i+1)/x)*f-f1after),f, f1after);
  printf("Upward Recursion:      f1before:%f  fn:%f  f1after:%f \n \n", f1before, f, (((2*i+1))*f-f1before));
  }

  return 0;
  }
Exemplo n.º 5
0
/**
 * builds a polyphase filterbank.
 * @param factor resampling factor
 * @param scale wanted sum of coefficients for each filter
 * @param type 0->cubic, 1->blackman nuttall windowed sinc, 2->kaiser windowed sinc beta=16
 */
void av_build_filter(int16_t *filter, double factor, int tap_count, int phase_count, int scale, int type){
    int ph, i, v;
    double x, y, w, tab[tap_count];
    const int center= (tap_count-1)/2;

    /* if upsampling, only need to interpolate, no filter */
    if (factor > 1.0)
        factor = 1.0;

    for(ph=0;ph<phase_count;ph++) {
        double norm = 0;
        double e= 0;
        for(i=0;i<tap_count;i++) {
            x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor;
            if (x == 0) y = 1.0;
            else        y = sin(x) / x;
            switch(type){
            case 0:{
                const float d= -0.5; //first order derivative = -0.5
                x = fabs(((double)(i - center) - (double)ph / phase_count) * factor);
                if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*(            -x*x + x*x*x);
                else      y=                       d*(-4 + 8*x - 5*x*x + x*x*x);
                break;}
            case 1:
                w = 2.0*x / (factor*tap_count) + M_PI;
                y *= 0.3635819 - 0.4891775 * cos(w) + 0.1365995 * cos(2*w) - 0.0106411 * cos(3*w);
                break;
            case 2:
                w = 2.0*x / (factor*tap_count*M_PI);
                y *= bessel(16*sqrt(FFMAX(1-w*w, 0)));
                break;
            }

            tab[i] = y;
            norm += y;
        }

        /* normalize so that an uniform color remains the same */
        for(i=0;i<tap_count;i++) {
            v = clip(lrintf(tab[i] * scale / norm) + e, -32768, 32767);
            filter[ph * tap_count + i] = v;
            e += tab[i] * scale / norm - v;
        }
    }
}
Exemplo n.º 6
0
double geomCovariance(double *dist, int n, int dim, int covmod,
		      double sigma2, double sigma2Bound, double nugget,
		      double range, double smooth, double smooth2,
		      double *rho){

  //This function computes the geometric gaussian covariance function
  //between each pair of locations.
  //When ans != 0.0, the parameters are ill-defined.
  const double twiceSigma2 = 2 * sigma2;
  double ans = 0.0;

  if (sigma2 <= 0)
    return (1 - sigma2) * (1 - sigma2) * MINF;

  if (sigma2 > sigma2Bound)
    return (sigma2Bound - 1 - sigma2) * (sigma2Bound - 1 - sigma2) * MINF;

  switch (covmod){
  case 1:
    ans = whittleMatern(dist, n, nugget, 1 - nugget, range, smooth, rho);
    break;
  case 2:
    ans = cauchy(dist, n, nugget, 1 - nugget, range, smooth, rho);
    break;
  case 3:
    ans = powerExp(dist, n, nugget, 1 - nugget, range, smooth, rho);
    break;
  case 4:
    ans = bessel(dist, n, dim, nugget, 1 - nugget, range, smooth, rho);
    break;
  case 5:
    ans = caugen(dist, n, nugget, 1 - nugget, range, smooth, smooth2, rho);
  }

  if (ans != 0.0)
    return ans;

  #pragma omp parallel for
  for (int i=0;i<n;i++)
    rho[i] = sqrt(twiceSigma2 * (1 - rho[i]));

  return ans;
}
Exemplo n.º 7
0
void extremaltfull(int *covmod, double *data, double *dist, int *nSite, int *nObs,
		   int *dim, int *weighted, double *weights, double *locs, double *scales,
		   double *shapes, double *nugget, double *range, double *smooth, double *smooth2,
		   double *df, int *fitmarge, double *dns){
  //This is the extremal t model. It's a wrapper to several
  //sub-functions. It's named xxxfull as it either assume that the
  //margins are unit Frechet, or the GEV parameters are estimated at
  //each locations.

  const int nPairs = *nSite * (*nSite - 1) / 2;

  double *jac = malloc(*nSite * *nObs * sizeof(double)),
    *rho = malloc(nPairs * sizeof(double)),
    *frech = malloc(*nSite * *nObs * sizeof(double));

  //Some preliminary steps: Valid points?
  if (*fitmarge){
    for (int i=0;i<*nSite;i++){
      if ((scales[i] <= 0) || (shapes[i] <= -1)){
	*dns = MINF;
	return;
      }
    }
  }

  if (*df <= 0){
    *dns = (1 - *df) * (1 - *df) * MINF;
    return;
  }

  /*else if (*df >= 15){
    *dns = (*df - 14) * (*df - 14) * MINF;
    return;
    }*/

  if (*nugget >= 1){
    *dns = *nugget * *nugget * MINF;
    return;
  }

  //Stage 1: Compute the covariance at each location
  switch (*covmod){
  case 1:
    *dns = whittleMatern(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 2:
    *dns = cauchy(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 3:
    *dns = powerExp(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 4:
    *dns = bessel(dist, nPairs, *dim, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 5:
    *dns = caugen(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, *smooth2, rho);
    break;
  }

  if (*dns != 0.0)
    return;

  //Stage 2: Transformation to unit Frechet
  if (*fitmarge){
    *dns = gev2frech(data, *nObs, *nSite, locs, scales, shapes, jac, frech);

    if (*dns != 0.0)
      return;

    if (*weighted)
      *dns = wlplikextremalt(frech, rho, *df, jac, *nObs, *nSite, weights);

    else
      *dns = lplikextremalt(frech, rho, *df, jac, *nObs, *nSite);
  }

  else {
    for (int i=0;i<(*nSite * *nObs);i++)
      jac[i] = 0;

    if (*weighted)
      *dns = wlplikextremalt(data, rho, *df, jac, *nObs, *nSite, weights);

    else
      *dns = lplikextremalt(data, rho, *df, jac, *nObs, *nSite);
  }


  free(jac); free(rho); free(frech);
  return;
}
Exemplo n.º 8
0
/**
 * builds a polyphase filterbank.
 * @param factor resampling factor
 * @param scale wanted sum of coefficients for each filter
 * @param filter_type  filter type
 * @param kaiser_beta  kaiser window beta
 * @return 0 on success, negative on error
 */
static int build_filter(ResampleContext *c, void *filter, double factor, int tap_count, int alloc, int phase_count, int scale,
                        int filter_type, double kaiser_beta){
    int ph, i;
    int ph_nb = phase_count % 2 ? phase_count : phase_count / 2 + 1;
    double x, y, w, t, s;
    double *tab = av_malloc_array(tap_count+1,  sizeof(*tab));
    double *sin_lut = av_malloc_array(ph_nb, sizeof(*sin_lut));
    const int center= (tap_count-1)/2;
    int ret = AVERROR(ENOMEM);

    if (!tab || !sin_lut)
        goto fail;

    /* if upsampling, only need to interpolate, no filter */
    if (factor > 1.0)
        factor = 1.0;

    if (factor == 1.0) {
        for (ph = 0; ph < ph_nb; ph++)
            sin_lut[ph] = sin(M_PI * ph / phase_count);
    }
    for(ph = 0; ph < ph_nb; ph++) {
        double norm = 0;
        s = sin_lut[ph];
        for(i=0;i<=tap_count;i++) {
            x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor;
            if (x == 0) y = 1.0;
            else if (factor == 1.0)
                y = s / x;
            else
                y = sin(x) / x;
            switch(filter_type){
            case SWR_FILTER_TYPE_CUBIC:{
                const float d= -0.5; //first order derivative = -0.5
                x = fabs(((double)(i - center) - (double)ph / phase_count) * factor);
                if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*(            -x*x + x*x*x);
                else      y=                       d*(-4 + 8*x - 5*x*x + x*x*x);
                break;}
            case SWR_FILTER_TYPE_BLACKMAN_NUTTALL:
                w = 2.0*x / (factor*tap_count);
                t = -cos(w);
                y *= 0.3635819 - 0.4891775 * t + 0.1365995 * (2*t*t-1) - 0.0106411 * (4*t*t*t - 3*t);
                break;
            case SWR_FILTER_TYPE_KAISER:
                w = 2.0*x / (factor*tap_count*M_PI);
                y *= bessel(kaiser_beta*sqrt(FFMAX(1-w*w, 0)));
                break;
            default:
                av_assert0(0);
            }

            tab[i] = y;
            s = -s;
            if (i < tap_count)
                norm += y;
        }

        /* normalize so that an uniform color remains the same */
        switch(c->format){
        case AV_SAMPLE_FMT_S16P:
            for(i=0;i<tap_count;i++)
                ((int16_t*)filter)[ph * alloc + i] = av_clip_int16(lrintf(tab[i] * scale / norm));
            if (phase_count % 2) break;
            if (tap_count % 2 == 0 || tap_count == 1) {
                for (i = 0; i < tap_count; i++)
                    ((int16_t*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((int16_t*)filter)[ph * alloc + i];
            }
            else {
                for (i = 1; i <= tap_count; i++)
                    ((int16_t*)filter)[(phase_count-ph) * alloc + tap_count-i] =
                        av_clip_int16(lrintf(tab[i] * scale / (norm - tab[0] + tab[tap_count])));
            }
            break;
        case AV_SAMPLE_FMT_S32P:
            for(i=0;i<tap_count;i++)
                ((int32_t*)filter)[ph * alloc + i] = av_clipl_int32(llrint(tab[i] * scale / norm));
            if (phase_count % 2) break;
            if (tap_count % 2 == 0 || tap_count == 1) {
                for (i = 0; i < tap_count; i++)
                    ((int32_t*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((int32_t*)filter)[ph * alloc + i];
            }
            else {
                for (i = 1; i <= tap_count; i++)
                    ((int32_t*)filter)[(phase_count-ph) * alloc + tap_count-i] =
                        av_clipl_int32(llrint(tab[i] * scale / (norm - tab[0] + tab[tap_count])));
            }
            break;
        case AV_SAMPLE_FMT_FLTP:
            for(i=0;i<tap_count;i++)
                ((float*)filter)[ph * alloc + i] = tab[i] * scale / norm;
            if (phase_count % 2) break;
            if (tap_count % 2 == 0 || tap_count == 1) {
                for (i = 0; i < tap_count; i++)
                    ((float*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((float*)filter)[ph * alloc + i];
            }
            else {
                for (i = 1; i <= tap_count; i++)
                    ((float*)filter)[(phase_count-ph) * alloc + tap_count-i] = tab[i] * scale / (norm - tab[0] + tab[tap_count]);
            }
            break;
        case AV_SAMPLE_FMT_DBLP:
            for(i=0;i<tap_count;i++)
                ((double*)filter)[ph * alloc + i] = tab[i] * scale / norm;
            if (phase_count % 2) break;
            if (tap_count % 2 == 0 || tap_count == 1) {
                for (i = 0; i < tap_count; i++)
                    ((double*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((double*)filter)[ph * alloc + i];
            }
            else {
                for (i = 1; i <= tap_count; i++)
                    ((double*)filter)[(phase_count-ph) * alloc + tap_count-i] = tab[i] * scale / (norm - tab[0] + tab[tap_count]);
            }
            break;
        }
    }
#if 0
    {
#define LEN 1024
        int j,k;
        double sine[LEN + tap_count];
        double filtered[LEN];
        double maxff=-2, minff=2, maxsf=-2, minsf=2;
        for(i=0; i<LEN; i++){
            double ss=0, sf=0, ff=0;
            for(j=0; j<LEN+tap_count; j++)
                sine[j]= cos(i*j*M_PI/LEN);
            for(j=0; j<LEN; j++){
                double sum=0;
                ph=0;
                for(k=0; k<tap_count; k++)
                    sum += filter[ph * tap_count + k] * sine[k+j];
                filtered[j]= sum / (1<<FILTER_SHIFT);
                ss+= sine[j + center] * sine[j + center];
                ff+= filtered[j] * filtered[j];
                sf+= sine[j + center] * filtered[j];
            }
            ss= sqrt(2*ss/LEN);
            ff= sqrt(2*ff/LEN);
            sf= 2*sf/LEN;
            maxff= FFMAX(maxff, ff);
            minff= FFMIN(minff, ff);
            maxsf= FFMAX(maxsf, sf);
            minsf= FFMIN(minsf, sf);
            if(i%11==0){
                av_log(NULL, AV_LOG_ERROR, "i:%4d ss:%f ff:%13.6e-%13.6e sf:%13.6e-%13.6e\n", i, ss, maxff, minff, maxsf, minsf);
                minff=minsf= 2;
                maxff=maxsf= -2;
            }
        }
    }
#endif

    ret = 0;
fail:
    av_free(tab);
    av_free(sin_lut);
    return ret;
}
Exemplo n.º 9
0
/**
 * Build a polyphase filterbank.
 * @param factor resampling factor
 * @param scale wanted sum of coefficients for each filter
 * @param type 0->cubic, 1->blackman nuttall windowed sinc, 2..16->kaiser windowed sinc beta=2..16
 * @return 0 on success, negative on error
 */
static int build_filter(FELEM *filter, double factor, int tap_count, int phase_count, int scale, int type){
    int ph, i;
    double x, y, w;
    double *tab = av_malloc_array(tap_count, sizeof(*tab));
    const int center= (tap_count-1)/2;

    if (!tab)
        return AVERROR(ENOMEM);

    /* if upsampling, only need to interpolate, no filter */
    if (factor > 1.0)
        factor = 1.0;

    for(ph=0;ph<phase_count;ph++) {
        double norm = 0;
        for(i=0;i<tap_count;i++) {
            x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor;
            if (x == 0) y = 1.0;
            else        y = sin(x) / x;
            switch(type){
            case 0:{
                const float d= -0.5; //first order derivative = -0.5
                x = fabs(((double)(i - center) - (double)ph / phase_count) * factor);
                if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*(            -x*x + x*x*x);
                else      y=                       d*(-4 + 8*x - 5*x*x + x*x*x);
                break;}
            case 1:
                w = 2.0*x / (factor*tap_count) + M_PI;
                y *= 0.3635819 - 0.4891775 * cos(w) + 0.1365995 * cos(2*w) - 0.0106411 * cos(3*w);
                break;
            default:
                w = 2.0*x / (factor*tap_count*M_PI);
                y *= bessel(type*sqrt(FFMAX(1-w*w, 0)));
                break;
            }

            tab[i] = y;
            norm += y;
        }

        /* normalize so that an uniform color remains the same */
        for(i=0;i<tap_count;i++) {
#ifdef CONFIG_RESAMPLE_AUDIOPHILE_KIDDY_MODE
            filter[ph * tap_count + i] = tab[i] / norm;
#else
            filter[ph * tap_count + i] = av_clip(lrintf(tab[i] * scale / norm), FELEM_MIN, FELEM_MAX);
#endif
        }
    }
#if 0
    {
#define LEN 1024
        int j,k;
        double sine[LEN + tap_count];
        double filtered[LEN];
        double maxff=-2, minff=2, maxsf=-2, minsf=2;
        for(i=0; i<LEN; i++){
            double ss=0, sf=0, ff=0;
            for(j=0; j<LEN+tap_count; j++)
                sine[j]= cos(i*j*M_PI/LEN);
            for(j=0; j<LEN; j++){
                double sum=0;
                ph=0;
                for(k=0; k<tap_count; k++)
                    sum += filter[ph * tap_count + k] * sine[k+j];
                filtered[j]= sum / (1<<FILTER_SHIFT);
                ss+= sine[j + center] * sine[j + center];
                ff+= filtered[j] * filtered[j];
                sf+= sine[j + center] * filtered[j];
            }
            ss= sqrt(2*ss/LEN);
            ff= sqrt(2*ff/LEN);
            sf= 2*sf/LEN;
            maxff= FFMAX(maxff, ff);
            minff= FFMIN(minff, ff);
            maxsf= FFMAX(maxsf, sf);
            minsf= FFMIN(minsf, sf);
            if(i%11==0){
                av_log(NULL, AV_LOG_ERROR, "i:%4d ss:%f ff:%13.6e-%13.6e sf:%13.6e-%13.6e\n", i, ss, maxff, minff, maxsf, minsf);
                minff=minsf= 2;
                maxff=maxsf= -2;
            }
        }
    }
#endif

    av_free(tab);
    return 0;
}
Exemplo n.º 10
0
/**
 * builds a polyphase filterbank.
 * @param factor resampling factor
 * @param scale wanted sum of coefficients for each filter
 * @param filter_type  filter type
 * @param kaiser_beta  kaiser window beta
 * @return 0 on success, negative on error
 */
static int build_filter(ResampleContext *c, void *filter, double factor, int tap_count, int alloc, int phase_count, int scale,
                        int filter_type, int kaiser_beta){
    int ph, i;
    double x, y, w;
    double *tab = av_malloc(tap_count * sizeof(*tab));
    const int center= (tap_count-1)/2;

    if (!tab)
        return AVERROR(ENOMEM);

    /* if upsampling, only need to interpolate, no filter */
    if (factor > 1.0)
        factor = 1.0;

    for(ph=0;ph<phase_count;ph++) {
        double norm = 0;
        for(i=0;i<tap_count;i++) {
            x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor;
            if (x == 0) y = 1.0;
            else        y = sin(x) / x;
            switch(filter_type){
            case SWR_FILTER_TYPE_CUBIC:{
                const float d= -0.5; //first order derivative = -0.5
                x = fabs(((double)(i - center) - (double)ph / phase_count) * factor);
                if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*(            -x*x + x*x*x);
                else      y=                       d*(-4 + 8*x - 5*x*x + x*x*x);
                break;}
            case SWR_FILTER_TYPE_BLACKMAN_NUTTALL:
                w = 2.0*x / (factor*tap_count) + M_PI;
                y *= 0.3635819 - 0.4891775 * cos(w) + 0.1365995 * cos(2*w) - 0.0106411 * cos(3*w);
                break;
            case SWR_FILTER_TYPE_KAISER:
                w = 2.0*x / (factor*tap_count*M_PI);
                y *= bessel(kaiser_beta*sqrt(FFMAX(1-w*w, 0)));
                break;
            default:
                av_assert0(0);
            }

            tab[i] = y;
            norm += y;
        }

        /* normalize so that an uniform color remains the same */
        switch(c->format){
        case AV_SAMPLE_FMT_S16P:
            for(i=0;i<tap_count;i++)
                ((int16_t*)filter)[ph * alloc + i] = av_clip(lrintf(tab[i] * scale / norm), INT16_MIN, INT16_MAX);
            break;
        case AV_SAMPLE_FMT_S32P:
            for(i=0;i<tap_count;i++)
                ((int32_t*)filter)[ph * alloc + i] = av_clip(lrintf(tab[i] * scale / norm), INT32_MIN, INT32_MAX);
            break;
        case AV_SAMPLE_FMT_FLTP:
            for(i=0;i<tap_count;i++)
                ((float*)filter)[ph * alloc + i] = tab[i] * scale / norm;
            break;
        case AV_SAMPLE_FMT_DBLP:
            for(i=0;i<tap_count;i++)
                ((double*)filter)[ph * alloc + i] = tab[i] * scale / norm;
            break;
        }
    }
#if 0
    {
#define LEN 1024
        int j,k;
        double sine[LEN + tap_count];
        double filtered[LEN];
        double maxff=-2, minff=2, maxsf=-2, minsf=2;
        for(i=0; i<LEN; i++){
            double ss=0, sf=0, ff=0;
            for(j=0; j<LEN+tap_count; j++)
                sine[j]= cos(i*j*M_PI/LEN);
            for(j=0; j<LEN; j++){
                double sum=0;
                ph=0;
                for(k=0; k<tap_count; k++)
                    sum += filter[ph * tap_count + k] * sine[k+j];
                filtered[j]= sum / (1<<FILTER_SHIFT);
                ss+= sine[j + center] * sine[j + center];
                ff+= filtered[j] * filtered[j];
                sf+= sine[j + center] * filtered[j];
            }
            ss= sqrt(2*ss/LEN);
            ff= sqrt(2*ff/LEN);
            sf= 2*sf/LEN;
            maxff= FFMAX(maxff, ff);
            minff= FFMIN(minff, ff);
            maxsf= FFMAX(maxsf, sf);
            minsf= FFMIN(minsf, sf);
            if(i%11==0){
                av_log(NULL, AV_LOG_ERROR, "i:%4d ss:%f ff:%13.6e-%13.6e sf:%13.6e-%13.6e\n", i, ss, maxff, minff, maxsf, minsf);
                minff=minsf= 2;
                maxff=maxsf= -2;
            }
        }
    }
#endif

    av_free(tab);
    return 0;
}
Exemplo n.º 11
0
void buildcovmat(int *nSite, int *grid, int *covmod, double *coord, int *dim,
		 double *nugget, double *sill, double *range,
		 double *smooth, double *covMat){

  int nPairs, effnSite = *nSite, zero = 0;
  const double one = 1, dzero = 0;
  double flag = 0;

  if (*grid)
    effnSite = R_pow_di(effnSite, *dim);

  nPairs = effnSite * (effnSite - 1) / 2;

  double *dist = malloc(nPairs * sizeof(double)),
    *rho = malloc(nPairs * sizeof(double)),
    *coordGrid = malloc(effnSite * *dim * sizeof(double));

  if (*grid){
    //Coord specify a grid
    for (int i = 0; i < *nSite; i++)
      for (int j = 0; j < *nSite; j++){
	coordGrid[j + i * *nSite] = coord[i];
	coordGrid[*nSite * (*nSite + i) + j] = coord[j];
      }

    distance(coordGrid, dim, &effnSite, &zero, dist);
  }

  else
    //Coord don't specify a grid
    distance(coord, dim, nSite, &zero, dist);

  switch (*covmod){
  case 1:
    flag = whittleMatern(dist, nPairs, dzero, one, *range, *smooth, rho);
    break;
  case 2:
    flag = cauchy(dist, nPairs, dzero, one, *range, *smooth, rho);
    break;
  case 3:
    flag = powerExp(dist, nPairs, dzero, one, *range, *smooth, rho);
    break;
  case 4:
    flag = bessel(dist, nPairs, *dim, dzero, one, *range, *smooth, rho);
    break;
  case 6:
    if (*grid)
      flag = fbm(coordGrid, dist, *dim, effnSite, one, *range, *smooth, rho);

    else
      flag = fbm(coord, dist, *dim, effnSite, one, *range, *smooth, rho);

    break;
  }

  if (flag != 0.0)
    error("The covariance parameters seem to be ill-defined. Please check\n");

  //Fill the non-diagonal elements of the covariance matrix
  //#pragma omp parallel for
  for (int currentPair=0;currentPair<nPairs;currentPair++){
    int i = 0, j = 0;
    getSiteIndex(currentPair, effnSite, &i, &j);
    covMat[effnSite * i + j] = covMat[effnSite * j + i] = *sill * rho[currentPair];
  }

  //Fill the diagonal elements of the covariance matrix
  if (*covmod == 6){
    //Fractional brownian
    double irange2 = 1 / (*range * *range);

    if (*grid){
      for (int i = 0; i < effnSite;i++){
	covMat[i * (effnSite + 1)] = 0;

	for (int j= 0; j < *dim; j++)
	  covMat[i * (effnSite + 1)] += coordGrid[i + j * effnSite] * coordGrid[i + j * effnSite];

	covMat[i * (effnSite + 1)] = 2 * pow(covMat[i * (effnSite + 1)] * irange2, 0.5 * *smooth);
      }
    }

    else {
      for (int i = 0; i < effnSite; i++){
	covMat[i * (effnSite + 1)] = 0;

	for (int j = 0; j < *dim; j++)
	  covMat[i * (effnSite + 1)] += coord[i + j * effnSite] * coord[i + j * effnSite];

	covMat[i * (effnSite + 1)] = 2 * pow(covMat[i * (effnSite + 1)] * irange2, 0.5 * *smooth);
      }
    }
  }

  else
    for (int i = 0; i < effnSite; i++)
      covMat[i * (effnSite + 1)] = *sill + *nugget;


  free(dist); free(rho); free(coordGrid);
  return;
}
Exemplo n.º 12
0
void compute_bessel ()
{
  bbeta = bessel (M_PI * alpha);
}
Exemplo n.º 13
0
int main(){
  for (n = 0; n<10; n++){
    printf("%lf %lf\n", x, bessel(x,n));
  }
  return 0;
} 
Exemplo n.º 14
0
void extremaltdsgnmat(int *covmod, double *data, double *dist, int *nSite, int *nObs, int *dim,
		      int *weighted, double *weights, double *locdsgnmat, double *locpenmat,
		      int *nloccoeff, int *npparloc, double *locpenalty, double *scaledsgnmat,
		      double *scalepenmat, int *nscalecoeff, int *npparscale,
		      double *scalepenalty, double *shapedsgnmat, double *shapepenmat,
		      int *nshapecoeff, int *npparshape, double *shapepenalty, int *usetempcov,
		      double *tempdsgnmatloc, double *temppenmatloc, int *ntempcoeffloc,
		      int *nppartempcoeffloc, double *temppenaltyloc, double *tempdsgnmatscale,
		      double *temppenmatscale, int *ntempcoeffscale, int *nppartempcoeffscale,
		      double *temppenaltyscale, double *tempdsgnmatshape, double *temppenmatshape,
		      int *ntempcoeffshape, int *nppartempcoeffshape, double *temppenaltyshape,
		      double *loccoeff, double *scalecoeff, double *shapecoeff,
		      double *tempcoeffloc, double *tempcoeffscale, double *tempcoeffshape,
		      double *nugget, double *range, double *smooth, double *smooth2, double *df,
		      double *dns){
  //This is the extremal t model. It's named xxxdsgnmat as either linear
  //models or p-splines are used for the gev parameters.

  const int nPairs = *nSite * (*nSite - 1) / 2;
  int flag = usetempcov[0] + usetempcov[1] + usetempcov[2];

  double *trendlocs = malloc(*nObs * sizeof(double)),
    *trendscales = malloc(*nObs * sizeof(double)),
    *trendshapes = malloc(*nObs * sizeof(double)),
    *jac = malloc(*nSite * *nObs * sizeof(double)),
    *rho = malloc(nPairs * sizeof(double)),
    *locs = malloc(*nSite * sizeof(double)),
    *scales = malloc(*nSite * sizeof(double)),
    *shapes = malloc(*nSite * sizeof(double)),
    *frech = malloc(*nSite * *nObs * sizeof(double));

  if (*df <= 0){
    *dns = (1 - *df) * (1 - *df) * MINF;
    return;
  }

  /*else if (*df >= 15){
    *dns = (*df - 14) * (*df - 14) * MINF;
    return;
    }*/

  if (*nugget >= 1){
    *dns = *nugget * *nugget * MINF;
    return;
  }

  //Stage 1: Compute the covariance at each location
  switch (*covmod){
  case 1:
    *dns = whittleMatern(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 2:
    *dns = cauchy(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 3:
    *dns = powerExp(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 4:
    *dns = bessel(dist, nPairs, *dim, *nugget, 1 - *nugget, *range, *smooth, rho);
    break;
  case 5:
    *dns = caugen(dist, nPairs, *nugget, 1 - *nugget, *range, *smooth, *smooth2, rho);
    break;
  }

  if (*dns != 0)
    return;

  //Stage 2: Computing the GEV parameters using the design matrix
  *dns = dsgnmat2Param(locdsgnmat, scaledsgnmat, shapedsgnmat, loccoeff, scalecoeff, shapecoeff,
		       *nSite, *nloccoeff, *nscalecoeff, *nshapecoeff, locs, scales, shapes);

  if (flag){
    dsgnmat2temptrend(tempdsgnmatloc, tempdsgnmatscale, tempdsgnmatshape, tempcoeffloc,
		      tempcoeffscale, tempcoeffshape, *nSite, *nObs, usetempcov, *ntempcoeffloc,
		      *ntempcoeffscale, *ntempcoeffshape, trendlocs, trendscales, trendshapes);

    for (int i=0;i<*nSite;i++)
      for (int j=0;j<*nObs;j++)
	if (((scales[i] + trendscales[j]) <= 0) || ((shapes[i] + trendshapes[j]) <= -1)){
	  *dns = MINF;
	  return;
	}
  }

  else if (*dns != 0.0)
    return;

  //Stage 3: Transformation to unit Frechet
  if (flag)
    *dns = gev2frechTrend(data, *nObs, *nSite, locs, scales, shapes, trendlocs, trendscales,
			  trendshapes, jac, frech);

  else
    *dns = gev2frech(data, *nObs, *nSite, locs, scales, shapes, jac, frech);

  if (*dns != 0.0)
    return;

  if (*weighted)
    *dns = wlplikextremalt(frech, rho, *df, jac, *nObs, *nSite, weights);

  else
    *dns = lplikextremalt(frech, rho, *df, jac, *nObs, *nSite);

  //Stage 5: Removing the penalizing terms (if any)
  // 1- For the location parameter
  if (*locpenalty > 0)
    *dns -= penalization(locpenmat, loccoeff, *locpenalty, *nloccoeff, *npparloc);

  // 2- For the scale parameter
  if (*scalepenalty > 0)
    *dns -= penalization(scalepenmat, scalecoeff, *scalepenalty, *nscalecoeff, *npparscale);

  // 3- For the shape parameter
  if (*shapepenalty > 0)
    *dns -= penalization(shapepenmat, shapecoeff, *shapepenalty, *nshapecoeff, *npparshape);

  // 4- Doing the same thing for the temporal component
  if (*temppenaltyloc > 0)
    *dns -= penalization(temppenmatloc, tempcoeffloc, *temppenaltyloc, *ntempcoeffloc,
			 *nppartempcoeffloc);

  if (*temppenaltyscale > 0)
    *dns -= penalization(temppenmatscale, tempcoeffscale, *temppenaltyscale, *ntempcoeffscale,
			 *nppartempcoeffscale);

  if (*temppenaltyshape > 0)
    *dns -= penalization(temppenmatshape, tempcoeffshape, *temppenaltyshape, *ntempcoeffshape,
			 *nppartempcoeffshape);

  // 4- Doing the same thing for the temporal component
  if (*temppenaltyloc > 0)
    *dns -= penalization(temppenmatloc, tempcoeffloc, *temppenaltyloc, *ntempcoeffloc,
			 *nppartempcoeffloc);

  if (*temppenaltyscale > 0)
    *dns -= penalization(temppenmatscale, tempcoeffscale, *temppenaltyscale, *ntempcoeffscale,
			 *nppartempcoeffscale);

  if (*temppenaltyshape > 0)
    *dns -= penalization(temppenmatshape, tempcoeffshape, *temppenaltyshape, *ntempcoeffshape,
			 *nppartempcoeffshape);

  free(trendlocs); free(trendscales); free(trendshapes); free(jac); free(rho); free(locs);
  free(scales); free(shapes); free(frech);
  return;
}
Exemplo n.º 15
0
void latentgev(int *n, double *data, int *nSite, int *nObs, int *covmod,
	       int *dim, double *distMat, double *dsgnMat, int *nBeta,
	       double *beta, double *sills, double *ranges, double *smooths,
	       double *gevParams, double *hyperSill, double *hyperRange,
	       double *hyperSmooth, double *hyperBetaMean,
	       double *hyperBetaIcov, double *propGev, double *propRanges,
	       double *propSmooths, double *mcLoc, double *mcScale,
	       double *mcShape, double *accRates, double *extRates, int *thin,
	       int *burnin){


  int iter = 0, iterThin = 0, idxSite, idxSite2, idxMarge, idxBeta, info = 0,
    oneInt = 1, nSite2 = *nSite * *nSite,
    nPairs = *nSite * (*nSite + 1) / 2,
    *cumBeta = (int *) R_alloc(4, sizeof(int)),
    *cumBeta2 = (int *) R_alloc(3, sizeof(int)),
    *nBeta2 = (int *) R_alloc(3, sizeof(int)),
    lagLoc = nBeta[0] + 3 + *nSite, lagScale = nBeta[1] + 3 + *nSite,
    lagShape = nBeta[2] + 3 + *nSite;

  cumBeta[0] = 0;
  cumBeta[1] = nBeta[0];
  cumBeta[2] = nBeta[0] + nBeta[1];
  cumBeta[3] = cumBeta[2] + nBeta[2];
  cumBeta2[0] = 0;
  cumBeta2[1] = nBeta[0] * nBeta[0];
  cumBeta2[2] = nBeta[0] * nBeta[0] + nBeta[1] * nBeta[1];
  nBeta2[0] = nBeta[0] * nBeta[0];
  nBeta2[1] = nBeta[1] * nBeta[1];
  nBeta2[2] = nBeta[2] * nBeta[2];

  double one = 1.0, zero = 0.0, flag = 0.0, logDetProp,
    *logDet = (double *) R_alloc(3, sizeof(double)),
    *covMatChol = (double *) R_alloc(3 * nSite2, sizeof(double)),
    *GPmean = (double *) R_alloc(3 * *nSite, sizeof(double)),
    *resTop = (double *) R_alloc(*nSite, sizeof(double)),
    *resBottom = (double *) R_alloc(*nSite, sizeof(double)),
    *covariances = (double *) R_alloc(nPairs, sizeof(double)),
    *proposalGEV = (double *) R_alloc(3, sizeof(double)),
    *covMatPropChol = (double *) R_alloc(nSite2, sizeof(double));

  for (int i=3;i--;)
    logDet[i] = 0;

  for (int i=(3 * nSite2);i--;)
    covMatChol[i] = 0;

  for (int i=(3 * *nSite);i--;)
    GPmean[i] = 0;

  for (int i=nSite2;i--;)
    covMatPropChol[i] = 0;

  /*----------------------------------------------------*/
  /*                                                    */
  /*           Compute some initial objects             */
  /*                                                    */
  /*----------------------------------------------------*/

  // a. The inverse of the covariance matrices
  for (idxMarge=0;idxMarge<3;idxMarge++){

    switch(covmod[idxMarge]){
    case 1:
      flag = whittleMatern(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			   smooths[idxMarge], covariances);
      break;
    case 2:
      flag = cauchy(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
		    smooths[idxMarge], covariances);
      break;
    case 3:
      flag = powerExp(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
		      smooths[idxMarge], covariances);
      break;
    case 4:
      flag = bessel(distMat, nPairs, *dim, zero, sills[idxMarge], ranges[idxMarge],
		    smooths[idxMarge], covariances);
      break;
    }

    if (flag != 0)
      error("The starting values (covariance parameter) are ill-defined. Please check\n");

    /* We need to fill in the upper triangular part of covMatChol with
       covariances */
    {
      int current=-1;
      for (idxSite=0;idxSite<*nSite;idxSite++)
	for (idxSite2=idxSite;idxSite2<*nSite;idxSite2++){
	  current++;
	  covMatChol[idxSite + idxSite2 * *nSite + idxMarge * nSite2] = covariances[current];
	}
    }

    // Finally compute its Cholesky decomposition
    F77_CALL(dpotrf)("U", nSite, covMatChol + idxMarge * nSite2, nSite, &info);

    if (info != 0)
      error("Impossible to get the Cholesky decomp. from the starting values\n");

    /* Compute the log of the determinant of the proposal
       cov. mat. using the sum of the square of the diagonal elements of
       the Cholesky decomposition */
    for (idxSite2=0;idxSite2<*nSite;idxSite2++)
      logDet[idxMarge] += log(covMatChol[idxSite2 * (*nSite + 1) + idxMarge *
					 nSite2]);

    logDet[idxMarge] *= 2;
  }

  // b. The mean of the Gaussian processes
  for (idxMarge=0;idxMarge<3;idxMarge++)
    for (idxSite=0;idxSite<*nSite;idxSite++)
      for (idxBeta=0;idxBeta<nBeta[idxMarge];idxBeta++)
	GPmean[idxSite + idxMarge * *nSite] +=
	  dsgnMat[idxBeta * *nSite + idxSite + cumBeta[idxMarge] * *nSite] *
	  beta[cumBeta[idxMarge] + idxBeta];

  // c. Some constant related to the conjugate distributions
  double *conjMeanCst = (double *)R_alloc(cumBeta[3], sizeof(double));
  for(int i=cumBeta[3];i--;)
    conjMeanCst[i]=0;

  for (idxMarge=0;idxMarge<3;idxMarge++)
    F77_CALL(dsymv)("U", nBeta + idxMarge, &one, hyperBetaIcov +
		    cumBeta2[idxMarge], nBeta + idxMarge, hyperBetaMean +
		    cumBeta[idxMarge], &oneInt, &zero, conjMeanCst + cumBeta[idxMarge],
		    &oneInt);

  /*----------------------------------------------------*/
  /*                                                    */
  /*               Starting the MCMC algo               */
  /*                                                    */
  /*----------------------------------------------------*/

  GetRNGstate();
  while (iterThin<*n){

    /*----------------------------------------------------*/
    /*                                                    */
    /*           Updating the GEV parameters              */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxSite=0;idxSite<*nSite;idxSite++){
      for (idxMarge=0;idxMarge<3;idxMarge++){
	double logpropRatio = 0;
	proposalGEV[0] = gevParams[idxSite];
	proposalGEV[1] = gevParams[*nSite + idxSite];
	proposalGEV[2] = gevParams[2 * *nSite + idxSite];

	if (idxMarge==1){
	  proposalGEV[1] = rlnorm(log(gevParams[*nSite + idxSite]), propGev[1]);
	  logpropRatio = log(proposalGEV[1] / gevParams[*nSite + idxSite]);
	}

	else
	  proposalGEV[idxMarge] = rnorm(gevParams[idxMarge * *nSite + idxSite], propGev[idxMarge]);

	double topGEV = 0, bottomGEV = 0;
	gevlik(data + idxSite * *nObs, nObs, proposalGEV, proposalGEV + 1,
	       proposalGEV + 2, &topGEV);

	if (topGEV == -1e6){
	  extRates[idxMarge]++;
	  continue;
	}

	gevlik(data + idxSite * *nObs, nObs, gevParams + idxSite, gevParams +
	       *nSite + idxSite, gevParams + 2 * *nSite + idxSite, &bottomGEV);

	double topGP = 0, bottomGP = 0;
	for (idxSite2=0;idxSite2<*nSite;idxSite2++)
	  resBottom[idxSite2] = gevParams[idxSite2 + idxMarge * *nSite] -
	    GPmean[idxSite2 + idxMarge * *nSite];

	memcpy(resTop, resBottom, *nSite * sizeof(double));
	resTop[idxSite] = proposalGEV[idxMarge] - GPmean[idxSite + idxMarge *
							 *nSite];

	F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
			idxMarge * nSite2, nSite, resTop, nSite);
	F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
			idxMarge * nSite2, nSite, resBottom, nSite);

	for (idxSite2=0;idxSite2<*nSite;idxSite2++){
	  topGP += resTop[idxSite2] * resTop[idxSite2];
	  bottomGP += resBottom[idxSite2] * resBottom[idxSite2];
	}

	topGP *= -0.5;
	bottomGP *= -0.5;

	if (unif_rand() < exp(topGEV - bottomGEV + topGP - bottomGP +
			      logpropRatio)){
	  gevParams[idxSite + idxMarge * *nSite] = proposalGEV[idxMarge];
	  accRates[idxMarge]++;
	}
      }
    }

    /*----------------------------------------------------*/
    /*                                                    */
    /*        Updating the regression parameters          */
    /*                (conjugate prior)                   */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxMarge=0;idxMarge<3;idxMarge++){

      /* conjCovMat is the covariance matrix for the conjugate
	 distribution i.e. MVN

	 conjCovMatChol is its Cholesky decomposition */
      double *dummy = malloc(*nSite * nBeta[idxMarge] * sizeof(double)),
	*conjCovMat = malloc(nBeta2[idxMarge] * sizeof(double)),
	*conjCovMatChol = malloc(nBeta2[idxMarge] * sizeof(double));

      memcpy(conjCovMat, hyperBetaIcov + cumBeta2[idxMarge],
	     nBeta2[idxMarge] * sizeof(double));
      memcpy(dummy, dsgnMat + *nSite * cumBeta[idxMarge],
	     *nSite * nBeta[idxMarge] * sizeof(double));

      // Compute dummy = covMatChol^(-T) %*% dsgnMat
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, nBeta + idxMarge, &one,
		      covMatChol + idxMarge * nSite2, nSite, dummy, nSite);

      /* Compute conjCovMat = dummy^T %*% dummy + conjCovMat

	 WARNING: Only the upper diagonal elements will be stored */
      F77_CALL(dsyrk)("U", "T", nBeta + idxMarge, nSite, &one, dummy, nSite,
		      &one, conjCovMat, nBeta + idxMarge);

      /* Rmk: The "real" conjugate cov. matrix is the inverse of
	 conjCovMat but it is not necessary to compute it */

      //Compute its Cholesky decomposition
      memcpy(conjCovMatChol, conjCovMat, nBeta2[idxMarge] * sizeof(double));
      F77_CALL(dpotrf)("U", nBeta + idxMarge, conjCovMatChol, nBeta + idxMarge,
		       &info);

      // Compute dummy2 = covMatChol^(-T) %*% (locs or scales or shapes)
      double *dummy2 = malloc(*nSite * sizeof(double));
      memcpy(dummy2, gevParams + idxMarge * *nSite, *nSite * sizeof(double));
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
		      idxMarge * nSite2, nSite, dummy2, nSite);

      // conjMean is the mean for the conjugate distribution i.e. MVN
      // Set conjMean = conjMeanCst := hyperBetaIcov %*% hyperBetaMean
      double *conjMean = malloc(nBeta[idxMarge] * sizeof(double));
      memcpy(conjMean, conjMeanCst + cumBeta[idxMarge],
	     nBeta[idxMarge] * sizeof(double));

      // Compute conjMean = conjMean + dummy^T %*% dummy2 (dummy2 is a vector)
      F77_CALL(dgemv)("T", nSite, nBeta + idxMarge, &one, dummy, nSite, dummy2,
		      &oneInt, &one, conjMean, &oneInt);

      // Compute conjMean = conjCovMat^(-1) %*% conjMean
      F77_CALL(dposv)("U", nBeta + idxMarge, &oneInt, conjCovMat, nBeta +
		      idxMarge, conjMean, nBeta + idxMarge, &info);

      /* The new state is a realisation from the MVN(conjMean,
	 conjCovMat) so we simulate it from the Cholesky
	 decomposition */

      double *stdNormal = malloc(nBeta[idxMarge] * sizeof(double));
      for (idxBeta=0;idxBeta<nBeta[idxMarge];idxBeta++)
	stdNormal[idxBeta] = norm_rand();

      /* Rmk: Recall that conjCovMat is the precision matrix and *NOT*
	 the covariance matrix. Instead of using the Cholesky
	 decomposition of the conjugate covariance matrix (that we
	 still haven't computed), we use the inverse of the Cholesky
	 decomposition. This is different from the standard simulation
	 technique but completely equivalent since

	      iSigma = iSigma_*^T %*% iSigma_*
	 <==> Sigma := iSigma^(-1) = iSigma_*^(-1) %*% iSigma_*^(-T),

	 where iSigma_* is the Cholesky decomposition of iSigma.

	 Therefore we can use iSigma_*^(-1) for the simulation. */
      F77_CALL(dtrsm)("L", "U", "N", "N", nBeta + idxMarge, &oneInt,
		      &one, conjCovMatChol, nBeta + idxMarge, stdNormal,
		      nBeta + idxMarge);

      for (idxBeta=0;idxBeta<nBeta[idxMarge];idxBeta++)
	beta[cumBeta[idxMarge] + idxBeta] = stdNormal[idxBeta] +
	  conjMean[idxBeta];

      //The last step is to update the mean of the GP
      for (idxSite=0;idxSite<*nSite;idxSite++){
	GPmean[idxSite + idxMarge * *nSite] = 0;

	for (idxBeta=0;idxBeta<nBeta[idxMarge];idxBeta++)
	  GPmean[idxSite + idxMarge * *nSite] += dsgnMat[idxBeta * *nSite + idxSite +
							 cumBeta[idxMarge] * *nSite] *
	    beta[cumBeta[idxMarge] + idxBeta];
      }

      free(dummy);
      free(conjCovMat);
      free(conjCovMatChol);
      free(dummy2);
      free(conjMean);
      free(stdNormal);
    }

    /*----------------------------------------------------*/
    /*                                                    */
    /*        Updating the sills (conjugate prior)        */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxMarge=0;idxMarge<3;idxMarge++){
      for (idxSite=0;idxSite<*nSite;idxSite++)
	resTop[idxSite] = gevParams[idxSite + idxMarge * *nSite] -
	  GPmean[idxSite + idxMarge * *nSite];

      // Compute resTop = covMatChol^(-T) %*% resTop
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
		      idxMarge * nSite2, nSite, resTop, nSite);

      double shape = 0.5 * *nSite + hyperSill[2 * idxMarge];
      double scale = 0;
      for (idxSite=0;idxSite<*nSite;idxSite++)
	scale += resTop[idxSite] * resTop[idxSite];

      scale = hyperSill[1 + 2 * idxMarge] + 0.5 * sills[idxMarge] * scale;

      /* Rmk: If Y ~ Gamma(shape = shape, rate = 1 / scale) then X :=
	 1 / Y \sim IGamma(shape = shape, scale = scale) */
      sills[idxMarge] = 1 / rgamma(shape,  1 / scale);

      // Now we need to update the covariance matrix and its inverse
      switch(covmod[idxMarge]){
      case 1:
	flag = whittleMatern(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			     smooths[idxMarge], covariances);
	break;
      case 2:
	flag = cauchy(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
		      smooths[idxMarge], covariances);
	break;
      case 3:
	flag = powerExp(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			smooths[idxMarge], covariances);
	break;
      case 4:
	flag = bessel(distMat, nPairs, *dim, zero, sills[idxMarge], ranges[idxMarge],
		      smooths[idxMarge], covariances);
	break;
      }

      /* We need to fill in the upper triangular part of covMatChol with
	 covariances */
      {
	int current=-1;
	for (idxSite=0;idxSite<*nSite;idxSite++)
	  for (idxSite2=idxSite;idxSite2<*nSite;idxSite2++){
	    current++;
	    covMatChol[idxSite + idxSite2 * *nSite + idxMarge * nSite2] = covariances[current];
	  }
      }

      // Cholesky decomposition of the covariance matrices
      F77_CALL(dpotrf)("U", nSite, covMatChol + idxMarge * nSite2, nSite,
		       &info);

      // Compute the log of the determinant of the proposal cov. mat.
      logDet[idxMarge] = 0;
      for (idxSite=0;idxSite<*nSite;idxSite++)
	logDet[idxMarge] += log(covMatChol[idxSite * (1 + *nSite) + idxMarge *
					   nSite2]);

      logDet[idxMarge] *= 2;
    }


    /*----------------------------------------------------*/
    /*                                                    */
    /*          Updating the ranges (M.-H. step)          */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxMarge=0;idxMarge<3;idxMarge++){
      if (propRanges[idxMarge] == 0)
	continue;

      double rangeProp = rlnorm(log(ranges[idxMarge]), propRanges[idxMarge]),
	logpropRatio = log(rangeProp / ranges[idxMarge]);

      switch(covmod[idxMarge]){
      case 1:
	flag = whittleMatern(distMat, nPairs, zero, sills[idxMarge], rangeProp,
			     smooths[idxMarge], covariances);
	break;
      case 2:
	flag = cauchy(distMat, nPairs, zero, sills[idxMarge], rangeProp,
		      smooths[idxMarge], covariances);
	break;
      case 3:
	flag = powerExp(distMat, nPairs, zero, sills[idxMarge], rangeProp,
			smooths[idxMarge], covariances);
	break;
      case 4:
	flag = bessel(distMat, nPairs, *dim, zero, sills[idxMarge], rangeProp,
		      smooths[idxMarge], covariances);
	break;
      }

      if (flag != 0){
	extRates[3 + idxMarge]++;
	continue;
      }

      /* We need to fill in the upper triangular part of covMatPropChol
	 with covariances */
      {
	int current=-1;
	for (idxSite=0;idxSite<*nSite;idxSite++)
	  for (idxSite2=idxSite;idxSite2<*nSite;idxSite2++){
	    current++;
	    covMatPropChol[idxSite + idxSite2 * *nSite] = covariances[current];
	  }
      }

      // Cholesky decomposition of the proposal cov. mat.
      F77_CALL(dpotrf)("U", nSite, covMatPropChol, nSite, &info);

      if (info != 0){
	extRates[3 + idxMarge]++;
	continue;
      }

      // Log of the determinant of the proposal cov. mat.
      logDetProp = 0;
      for (idxSite=0;idxSite<*nSite;idxSite++)
	logDetProp += log(covMatPropChol[idxSite * (1 + *nSite)]);

      logDetProp *= 2;

      for (idxSite=0;idxSite<*nSite;idxSite++)
	resBottom[idxSite] = gevParams[idxSite + idxMarge * *nSite] -
	  GPmean[idxSite + idxMarge * *nSite];

      memcpy(resTop, resBottom, *nSite * sizeof(double));

      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
		      idxMarge * nSite2, nSite, resBottom, nSite);
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatPropChol,
		      nSite, resTop, nSite);

      double top = logDetProp, bottom = logDet[idxMarge],
	logpriorRatio = (hyperRange[2 * idxMarge] - 1) *
	log(rangeProp / ranges[idxMarge]) + (ranges[idxMarge] - rangeProp) /
	hyperRange[2 * idxMarge + 1];

      for (idxSite=0;idxSite<*nSite;idxSite++){
	top += resTop[idxSite] * resTop[idxSite];
	bottom += resBottom[idxSite] * resBottom[idxSite];
      }

      top *= -0.5;
      bottom *= -0.5;

      if (unif_rand() < exp(top - bottom + logpriorRatio + logpropRatio)){
	ranges[idxMarge] = rangeProp;
	logDet[idxMarge] = logDetProp;
	memcpy(covMatChol + idxMarge * nSite2, covMatPropChol, nSite2 *
	       sizeof(double));
	accRates[3 + idxMarge]++;
      }
    }

    /*----------------------------------------------------*/
    /*                                                    */
    /*         Updating the smooths (M.-H. step)          */
    /*                                                    */
    /*----------------------------------------------------*/

    for (idxMarge=0;idxMarge<3;idxMarge++){
      if (propSmooths[idxMarge] == 0)
	continue;

      double smoothProp = rlnorm(log(smooths[idxMarge]), propSmooths[idxMarge]),
	logpropRatio = log(smoothProp / smooths[idxMarge]);

      switch(covmod[idxMarge]){
      case 1:
	flag = whittleMatern(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			     smoothProp, covariances);
	break;
      case 2:
	flag = cauchy(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
		      smoothProp, covariances);
	break;
      case 3:
	flag = powerExp(distMat, nPairs, zero, sills[idxMarge], ranges[idxMarge],
			smoothProp, covariances);
	break;
      case 4:
	flag = bessel(distMat, nPairs, *dim, zero, sills[idxMarge], ranges[idxMarge],
		      smoothProp, covariances);
	break;
      }

      if (flag != 0){
    	extRates[6 + idxMarge]++;
    	continue;
      }

      /* We need to fill in the upper triangular part of covMatPropChol
    	 with covariances */
      {
    	int current=-1;
    	for (idxSite=0;idxSite<*nSite;idxSite++)
    	  for (idxSite2=idxSite;idxSite2<*nSite;idxSite2++){
    	    current++;
    	    covMatPropChol[idxSite + idxSite2 * *nSite] = covariances[current];
    	  }
      }

      // Cholesky decomposition of the proposal cov. mat.
      F77_CALL(dpotrf)("U", nSite, covMatPropChol, nSite, &info);

      if (info != 0){
    	extRates[6 + idxMarge]++;
    	continue;
      }

      // Log of the determinant of the proposal cov. mat.
      logDetProp = 0;
      for (idxSite=0;idxSite<*nSite;idxSite++)
    	logDetProp += log(covMatPropChol[idxSite * (1 + *nSite)]);

      logDetProp *= 2;

      for (idxSite=0;idxSite<*nSite;idxSite++)
    	resBottom[idxSite] = gevParams[idxSite + idxMarge * *nSite] -
    	  GPmean[idxSite + idxMarge * *nSite];

      memcpy(resTop, resBottom, *nSite * sizeof(double));

      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatPropChol,
		      nSite, resTop, nSite);
      F77_CALL(dtrsm)("L", "U", "T", "N", nSite, &oneInt, &one, covMatChol +
		      idxMarge * nSite2, nSite, resBottom, nSite);

      double top = logDetProp, bottom = logDet[idxMarge],
    	logpriorRatio = (hyperSmooth[2 * idxMarge] - 1) *
    	log(smoothProp / smooths[idxMarge]) + (smooths[idxMarge] - smoothProp) /
    	hyperSmooth[2 * idxMarge + 1];

      for (idxSite=0;idxSite<*nSite;idxSite++){
    	top += resTop[idxSite] * resTop[idxSite];
    	bottom += resBottom[idxSite] * resBottom[idxSite];
      }

      top *= -0.5;
      bottom *= -0.5;

      if (unif_rand() < exp(top - bottom + logpriorRatio + logpropRatio)){
    	smooths[idxMarge] = smoothProp;
    	logDet[idxMarge] = logDetProp;
    	memcpy(covMatChol + idxMarge * nSite2, covMatPropChol, nSite2 *
    	       sizeof(double));
    	accRates[6 + idxMarge]++;
      }
    }

    iter++;

    //Need to store the new state into the mc object.
    if ((iter > *burnin) & ((iter % *thin) == 0)){
      mcLoc[nBeta[0] + iterThin * lagLoc] = sills[0];
      mcLoc[nBeta[0] + 1 + iterThin * lagLoc] = ranges[0];
      mcLoc[nBeta[0] + 2 + iterThin * lagLoc] = smooths[0];

      mcScale[nBeta[1] + iterThin * lagScale] = sills[1];
      mcScale[nBeta[1] + 1 + iterThin * lagScale] = ranges[1];
      mcScale[nBeta[1] + 2 + iterThin * lagScale] = smooths[1];

      mcShape[nBeta[2] + iterThin * lagShape] = sills[2];
      mcShape[nBeta[2] + 1 + iterThin * lagShape] = ranges[2];
      mcShape[nBeta[2] + 2 + iterThin * lagShape] = smooths[2];

      for (idxBeta=0;idxBeta<nBeta[0];idxBeta++)
	mcLoc[idxBeta + iterThin * lagLoc] = beta[idxBeta];

      for (idxBeta=0;idxBeta<nBeta[1];idxBeta++)
	mcScale[idxBeta + iterThin * lagScale] = beta[cumBeta[1] + idxBeta];

      for (idxBeta=0;idxBeta<nBeta[2];idxBeta++)
	mcShape[idxBeta + iterThin * lagShape] = beta[cumBeta[2] + idxBeta];

      for (idxSite=0;idxSite<*nSite;idxSite++){
	mcLoc[nBeta[0] + 3 + idxSite + iterThin * lagLoc] = gevParams[idxSite];
	mcScale[nBeta[1] + 3 + idxSite + iterThin * lagScale] = gevParams[*nSite + idxSite];
	mcShape[nBeta[2] + 3 + idxSite + iterThin * lagShape] = gevParams[2 * *nSite + idxSite];
      }
      iterThin++;
    }
  }
  GetRNGstate();

  for (int i=0;i<9;i++){
    accRates[i] /= (double) iter;
    extRates[i] /= (double) iter;
  }

  return;
}
Exemplo n.º 16
0
void rextremaltcirc(int *nObs, int *ngrid, double *steps, int *dim,
		    int *covmod, double *nugget, double *range,
		    double *smooth, double *DoF, double *uBound, double *ans){
  /* This function generates random fields from the Schlather model

     nObs: the number of observations to be generated
    ngrid: the number of locations along one axis
      dim: the random field is generated in R^dim
   covmod: the covariance model
     nugget: the nugget parameter
    range: the range parameter
   smooth: the smooth parameter
      DoF: the degree of freedom
blockSize: see rextremalttbm
      ans: the generated random field */

  int i, j, k = -1, nbar = R_pow_di(*ngrid, *dim), r, m;
  const double zero = 0;
  double *rho, *irho, sill = 1 - *nugget;
    //Below is a table of highly composite numbers
  int HCN[39] = {1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240,
		 360, 720, 840, 1260, 1680, 2520, 5040, 7560,
		 10080, 15120, 20160, 25200, 27720, 45360, 50400,
		 55440, 83160, 110880, 166320, 221760, 277200,
		 332640, 498960, 554400, 665280, 720720, 1081080};

    
  /* Find the smallest size m for the circulant embedding matrix */
  {
    int dummy = 2 * (*ngrid - 1);
    do {
      k++;
      m = HCN[k];
    } while (m < dummy);
  }
  
  /* ---------- beginning of the embedding stage ---------- */
  int mbar = m * m, halfM = m / 2, notPosDef = 0;
  do {
    double *dist = (double *)R_alloc(mbar, sizeof(double));

    notPosDef = 0;
    //Computation of the distance
    for (r=mbar;r--;){
      i = r % m;
      j = r / m;
      
      if (i > halfM)
	i -= m;
      
      if (j > halfM)
	j -= m;
      
      dist[r] = hypot(steps[0] * i, steps[1] * j);
    }

    //Computations of the covariances
    rho = (double *)R_alloc(mbar, sizeof(double));
    irho = (double *)R_alloc(mbar, sizeof(double));
    for (i=mbar;i--;)
      irho[i] = 0;

    switch (*covmod){
    case 1:
      whittleMatern(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 2:
      cauchy(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 3:
      powerExp(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 4:
      bessel(dist, mbar, *dim, zero, sill, *range, *smooth, rho);
      break;
    }

    /* Compute the eigen values to check if the circulant embbeding
       matrix is positive definite */

    /* Note : The next lines is only valid for 2d random fields. I
       need to change if there are m_1 \neq m_2 as I suppose that m_1
       = m_2 = m */
    int maxf, maxp, *iwork;
    double *work;

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, m, m, 1, -1, work, iwork);

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, 1, m, m, -1, work, iwork);

    //Check if the eigenvalues are all positive
    for (i=mbar;i--;){
      notPosDef |= (rho[i] <= 0) || (fabs(irho[i]) > 0.001);
    }

    if (notPosDef){
      k++;
      m = HCN[k];
      halfM = m / 2;
      mbar = m * m;
    }

    if (k > 30)
      error("Impossible to embbed the covariance matrix");
    
  } while (notPosDef);
  /* --------- end of the embedding stage --------- */

  /* Computation of the square root of the eigenvalues */
  for (i=mbar;i--;){
    rho[i] = sqrt(rho[i]);
    irho[i] = 0;//No imaginary part
  }

  int mdag = m / 2 + 1, mdagbar = mdag * mdag;
  double isqrtMbar = 1 / sqrt(mbar);

  double *a = malloc(mbar * sizeof(double)),
    *ia = malloc(mbar * sizeof(double)),
    *gp = malloc(nbar * sizeof(double));

  GetRNGstate();
  for (int i=*nObs;i--;){
    int nKO = nbar;
    double poisson = 0;

    while (nKO){
      poisson += exp_rand();
      double ipoisson = 1 / poisson,
	thresh = *uBound * ipoisson;
      
      /* We simulate one realisation of a gaussian random field with
	 the required covariance function */
      circcore(rho, a, ia, m, halfM, mdag, mdagbar, *ngrid, nbar, isqrtMbar, *nugget, gp);
      
      nKO = nbar;
      for (int j=nbar;j--;){
	double dummy = R_pow(fmax2(gp[j], 0), *DoF) * ipoisson;
	ans[j + i * nbar] = fmax2(dummy, ans[j + i * nbar]);
	nKO -= (thresh <= ans[j + i * nbar]);
      }
    }
  }
  
  PutRNGstate();
  
  //Lastly we multiply by the normalizing constant
  const double imean = M_SQRT_PI * R_pow(2, -0.5 * (*DoF - 2)) /
    gammafn(0.5 * (*DoF + 1));
  for (i=(nbar * *nObs);i--;)
    ans[i] *= imean;
  
  free(a); free(ia); free(gp);
  return;
}
Exemplo n.º 17
0
void circemb(int *nsim, int *ngrid, double *steps, int *dim, int *covmod,
	     double *nugget, double *sill, double *range, double *smooth,
	     double *ans){

  int i, j, k = -1, r, nbar = *ngrid * *ngrid, m;
  //irho is the imaginary part of the covariance -> 0
  double *rho, *irho;
  const double zero = 0;
  //Below is a table of highly composite numbers
  int HCN[39] = {1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240,
		 360, 720, 840, 1260, 1680, 2520, 5040, 7560,
		 10080, 15120, 20160, 25200, 27720, 45360, 50400,
		 55440, 83160, 110880, 166320, 221760, 277200,
		 332640, 498960, 554400, 665280, 720720, 1081080};

    
  /* Find the smallest size m for the circulant embedding matrix */
  {
    int dummy = 2 * (*ngrid - 1);
    do {
      k++;
      m = HCN[k];
    } while (m < dummy);
  }
  
  /* ---------- beginning of the embedding stage ---------- */
  int mbar = m * m, halfM = m / 2, notPosDef = 0;
  do {
    double *dist = malloc(mbar * sizeof(double));

    notPosDef = 0;
    //Computation of the distance
    for (r=mbar;r--;){
      i = r % m;
      j = r / m;
      
      if (i > halfM)
	i -= m;
      
      if (j > halfM)
	j -= m;
      
      dist[r] = hypot(steps[0] * i, steps[1] * j);
    }

    //Computations of the covariances
    rho = (double *)R_alloc(mbar, sizeof(double));
    irho = (double *)R_alloc(mbar, sizeof(double));

    for (i=mbar;i--;)
      irho[i] = 0;

    switch (*covmod){
    case 1:
      whittleMatern(dist, mbar, zero, *sill, *range, *smooth, rho);
      break;
    case 2:
      cauchy(dist, mbar, zero, *sill, *range, *smooth, rho);
      break;
    case 3:
      powerExp(dist, mbar, zero, *sill, *range, *smooth, rho);
      break;
    case 4:
      bessel(dist, mbar, *dim, zero, *sill, *range, *smooth, rho);
      break;
    }

    /* Compute the eigen values to check if the circulant embbeding
       matrix is positive definite */

    /* Note : The next lines is only valid for 2d random fields. I
       need to change if there are m_1 \neq m_2 as I suppose that m_1
       = m_2 = m */
    int maxf, maxp, *iwork;
    double *work;

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, m, m, 1, -1, work, iwork);

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, 1, m, m, -1, work, iwork);

    //Check if the eigenvalues are all positive
    for (i=mbar;i--;){
      notPosDef |= (rho[i] < 0) || (fabs(irho[i]) > 0.001);
    }

    if (notPosDef){
      k++;
      m = HCN[k];
      halfM = m / 2;
      mbar = m * m;
    }

    if (k > 30)
      error("Impossible to embbed the covariance matrix");

    free(dist);
    
  } while (notPosDef);
  /* --------- end of the embedding stage --------- */

  /* Computation of the square root of the eigenvalues */
  for (i=mbar;i--;){
    rho[i] = sqrt(rho[i]);
    irho[i] = 0;//No imaginary part
  }

  int mdag = m / 2 + 1, mdagbar = mdag * mdag;
  double isqrtMbar = 1 / sqrt(mbar);

  double *a = malloc(mbar * sizeof(double)),
    *ia = malloc(mbar * sizeof(double));
    
  GetRNGstate();
  for (k=*nsim;k--;){
    
    /* ---------- Simulation from \Lambda^1/2 Q* Z ------------ */
    for (r=mdagbar;r--;){
      /* Below is the procedure 5.2.4 in Wood and Chan */

      //Computation of the cardinality of A(j)
      int j1, j2,i = r % mdag, j = r / mdag;
      double u, v;

      int card = (i != 0) * (i != halfM) + 2 * (j != 0) * (j != halfM);
      
      switch (card){
      case 3:
	//B(1) = {1}, B^c(1) = {2}
	j1 = (m - i) + m * j;
	j2 = i + m * (m - j);
	u = norm_rand();
	v = norm_rand();
	a[j1] = ia[j1] = M_SQRT1_2 * rho[j1];
	a[j1] *= u;
	ia[j1] *= v;
	a[j2] = ia[j2] = M_SQRT1_2 * rho[j2];
	a[j2] *= u;
	ia[j2] *= -v;
	
	//B(2) = {1,2}, B^c(2) = {0}
	j1 = (m - i) + m * (m - j);
	j2 = i + m * j;
	u = norm_rand();
	v = norm_rand();
	a[j1] = ia[j1] = M_SQRT1_2 * rho[j1];
	a[j1]*= u;
	ia[j1] *= v;
	a[j2] = ia[j2] = M_SQRT1_2 * rho[j2];
	a[j2]*= u;
	ia[j2] *= -v;      
	break;
      case 1:
	//B(1) = 0, B^c(1) = {1}
	j1 = i + m * j;
	j2 = m - i + m * j;
	u = norm_rand();
	v = norm_rand();
	a[j1] = ia[j1] = M_SQRT1_2 * rho[j1];
	a[j1] *= u;
	ia[j1] *= v;
	a[j2] = ia[j2] = M_SQRT1_2 * rho[j2];
	a[j2] *= u;
	ia[j2] *= -v;
	break;
      case 2:
	//B(1) = 0, B^c(1) = {2}
	j1 = i + m * j;
	j2 = i + m * (m - j);
	u = norm_rand();
	v = norm_rand();
	a[j1] = ia[j1] = M_SQRT1_2 * rho[j1];
	a[j1] *= u;
	ia[j1] *= v;
	a[j2] = ia[j2] = M_SQRT1_2 * rho[j2];
	a[j2] *= u;
	ia[j2] *= -v;
	break;
      case 0:
	j1 = i + m * j;
	a[j1] = rho[j1] * norm_rand();
	ia[j1] = 0;
	break;      
      }
    }

    /* ---------- Computation of Q \Lambda^1/2 Q* Z ------------ */
    int maxf, maxp, *iwork;
    double *work;
    
    /* The next lines is only valid for 2d random fields. I need to
       change if m_1 \neq m_2 as here I suppose that m_1 = m_2 = m */
    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(a, ia, m, m, 1, -1, work, iwork);
    
    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(a, ia, 1, m, m, -1, work, iwork);
        
    for (i=nbar;i--;)
      ans[i + k * nbar] = isqrtMbar * a[i % *ngrid + m * (i / *ngrid)];
  }
  PutRNGstate();  
    
  if (*nugget > 0){
    int dummy = *nsim * nbar;
    double sqrtNugget = sqrt(*nugget);
    
    GetRNGstate();
    for (i=dummy;i--;)
      ans[i] += sqrtNugget * norm_rand();

    PutRNGstate();
  }

  free(a); free(ia);

  return;
}
Exemplo n.º 18
0
void rgeomcirc(int *nObs, int *ngrid, double *steps, int *dim,
	       int *covmod, double *sigma2, double *nugget, double *range,
	       double *smooth, double *uBound, double *ans){
  /* This function generates random fields from the geometric model

     nObs: the number of observations to be generated
    ngrid: the number of locations along one axis
      dim: the random field is generated in R^dim
   covmod: the covariance model
     nugget: the nugget parameter
    range: the range parameter
   smooth: the smooth parameter
   uBound: the uniform upper bound for the stoch. proc.
      ans: the generated random field */

  int i, j, k = -1, nbar = R_pow_di(*ngrid, *dim), r, m;
  const double loguBound = log(*uBound), halfSigma2 = 0.5 * *sigma2,
    zero = 0;
  double sigma = sqrt(*sigma2), sill = 1 - *nugget, *rho, *irho, *dist;

  //Below is a table of highly composite numbers
  int HCN[39] = {1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240,
		 360, 720, 840, 1260, 1680, 2520, 5040, 7560,
		 10080, 15120, 20160, 25200, 27720, 45360, 50400,
		 55440, 83160, 110880, 166320, 221760, 277200,
		 332640, 498960, 554400, 665280, 720720, 1081080};

    
  /* Find the smallest size m for the circulant embedding matrix */
  {
    int dummy = 2 * (*ngrid - 1);
    do {
      k++;
      m = HCN[k];
    } while (m < dummy);
  }
  
  /* ---------- beginning of the embedding stage ---------- */
  int mbar = m * m, halfM = m / 2, notPosDef = 0;
  do {
    dist = (double *)R_alloc(mbar, sizeof(double));

    notPosDef = 0;
    //Computation of the distance
    for (r=mbar;r--;){
      i = r % m;
      j = r / m;
      
      if (i > halfM)
	i -= m;
      
      if (j > halfM)
	j -= m;
      
      dist[r] = hypot(steps[0] * i, steps[1] * j);
    }

    //Computations of the covariances
    rho = (double *)R_alloc(mbar, sizeof(double));
    irho = (double *)R_alloc(mbar, sizeof(double));
    for (i=mbar;i--;)
      irho[i] = 0;

    switch (*covmod){
    case 1:
      whittleMatern(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 2:
      cauchy(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 3:
      powerExp(dist, mbar, zero, sill, *range, *smooth, rho);
      break;
    case 4:
      bessel(dist, mbar, *dim, zero, sill, *range, *smooth, rho);
      break;
    }

    /* Compute the eigen values to check if the circulant embbeding
       matrix is positive definite */

    /* Note : The next lines is only valid for 2d random fields. I
       need to change if there are m_1 \neq m_2 as I suppose that m_1
       = m_2 = m */
    int maxf, maxp;

    fft_factor(m, &maxf, &maxp);
    double *work = (double *)R_alloc(4 * maxf, sizeof(double));
    int *iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, m, m, 1, -1, work, iwork);

    fft_factor(m, &maxf, &maxp);
    work = (double *)R_alloc(4 * maxf, sizeof(double));
    iwork = (int *)R_alloc(maxp, sizeof(int));
    fft_work(rho, irho, 1, m, m, -1, work, iwork);

    //Check if the eigenvalues are all positive
    for (i=mbar;i--;){
      notPosDef |= (rho[i] <= 0) || (fabs(irho[i]) > 0.001);
    }

    if (notPosDef){
      k++;
      m = HCN[k];
      halfM = m / 2;
      mbar = m * m;
    }

    if (k > 30)
      error("Impossible to embbed the covariance matrix");
    
  } while (notPosDef);
  /* --------- end of the embedding stage --------- */

  /* Computation of the square root of the eigenvalues */
  for (i=mbar;i--;){
    rho[i] = sqrt(rho[i]);
    irho[i] = 0;//No imaginary part
  }

  int mdag = m / 2 + 1, mdagbar = mdag * mdag;
  double isqrtMbar = 1 / sqrt(mbar);

  double *a = (double *)R_alloc(mbar, sizeof(double));
  double *ia = (double *)R_alloc(mbar, sizeof(double));
  
  GetRNGstate();
  for (i=*nObs;i--;){
    int nKO = nbar;
    double poisson = 0;
    
    while (nKO) {
      /* The stopping rule is reached when nKO = 0 i.e. when each site
	 satisfies the condition in Eq. (8) of Schlather (2002) */
      int j;
      double *gp = (double *)R_alloc(nbar, sizeof(double));
      
      poisson += exp_rand();
      double ipoisson = -log(poisson), thresh = loguBound + ipoisson;
      
      /* We simulate one realisation of a gaussian random field with
	 the required covariance function */
      circcore(rho, a, ia, m, halfM, mdag, mdagbar, *ngrid, nbar, isqrtMbar, *nugget, gp);
      
      nKO = nbar;
      double ipoissonMinusHalfSigma2 = ipoisson - halfSigma2;
      for (j=nbar;j--;){
	ans[j + i * nbar] = fmax2(sigma * gp[j] + ipoissonMinusHalfSigma2,
				  ans[j + i * nbar]);
	nKO -= (thresh <= ans[j + i * nbar]);
	
      }
    }
  }
  
  PutRNGstate();

  /* So fare we generate a max-stable process with standard Gumbel
     margins. Switch to unit Frechet ones */
  for (i=*nObs * nbar;i--;)
    ans[i] = exp(ans[i]);
  
  return;
}