int main(int argc, char* argv[]) { int nt, nx, ntx, n2, n3, next; float v0, v1, dt, dx, t0, kur; float **data; sf_file inp, out; sf_init(argc,argv); inp = sf_input("in"); out = sf_output("out"); if (SF_FLOAT != sf_gettype(inp)) sf_error("Need float input"); if (!sf_histint(inp,"n1",&nt)) sf_error("No n1= in input"); if (!sf_histfloat(inp,"d1",&dt)) sf_error("No d1= in input"); if (!sf_histfloat(inp,"o1",&t0)) t0=0.; if (!sf_histint(inp,"n2",&nx)) sf_error("No n2= in input"); if (!sf_histfloat(inp,"d2",&dx)) sf_error("No d2= in input"); ntx = nt*nx; if (!sf_getfloat("v0",&v0)) v0=SF_EPS; /* initial velocity */ if (!sf_getfloat("v",&v1)) sf_error("Need v="); /* final velocity */ if (!sf_getint("pad",&n2)) n2=nt; /* padding for stretch */ if (!sf_getint("pad2",&n3)) n3=2*kiss_fft_next_fast_size((n2+1)/2); /* padding for FFT */ if (!sf_getint("extend",&next)) next=4; /* trace extension */ velcon_init(nt,nx,dt,dx,t0,n2,n3,next); data = sf_floatalloc2(nt,nx); sf_floatread(data[0],ntx,inp); kur = kurtosis(ntx,data[0]); sf_warning("kurtosis before: %g",kur); velcon(data,v0,v1); kur = kurtosis(ntx,data[0]); sf_warning("kurtosis after: %g",kur); sf_floatwrite(data[0],ntx,out); exit(0); }
void PowerSum::dump(std::ostream& str) const throw() { str << "n:" << n; for (int i=1; i<=order; i++) str << " s" << i << ":" << s[i]; str << std::endl; str << "m1:" << moment(1) << " m2:" << moment(2) << " m3:" << moment(3) << " m4:" << moment(4) << std::endl; str << "average:" << average() << " stddev:" << sqrt(variance()) << " skew:" << skew() << " kurtosis:" << kurtosis() << std::endl; }
/*! * This is the objective function for objects that have a cnls-like form * n coupled waves where the best solution is for each one to be a pulse. * It takes the energy of each pulse, and divides by the kurtosis of the * waveform of the laser pulse. This optimizes for energetic yet stable pulses */ double n_pulse_score::score(comp* ucur){ //This is not an optimal solution, //but isn't too slow and this code should never be a bottleneck //This verison is far easier to read/understand than the optimal version for(size_t i = 0; i < nts; i++){ help[i] = _sqabs(ucur[i]); for(size_t j = 1; j < n_pulse; j++){ help[i] += _sqabs(ucur[i+j*nts]); } help[i] = sqrt(help[i]); kurtosis_help[i] = help[i]; } double ener = energy(help, nts); fft(kurtosis_help, kurtosis_help, nts); for(size_t i = 0; i < nts; i++){ help[i] = abs(kurtosis_help[i]); } double kurtosis_v = 1.0/(kurtosis(help, nts)); double score = kurtosis_v* ener; return score; }
double contrast(ImageMatrix *image) { double *vec; int x,y; double avg,std,k; vec=new double[image->width*image->height]; avg=0; for (x=0;x<image->width;x++) for (y=0;y<image->height;y++) { vec[x*image->height+y]=image->pixel(x,y,0).intensity; avg+=image->pixel(x,y,0).intensity; } avg=avg/(image->width*image->height); std=0; for (x=0;x<image->width;x++) for (y=0;y<image->height;y++) std+=(image->pixel(x,y,0).intensity-avg)*(image->pixel(x,y,0).intensity-avg); std=sqrt(std/(image->width*image->height)); k=kurtosis(vec, avg, std, image->width*image->height); delete vec; if (std<0.0000000001) return(0); else return(std/ pow(k/pow(std,4),0.25) ); }
void get4scalMoments(double *vec, int vec_length, double *z) { int a; double sum,avg,s1; /* compute the std dev */ sum=0.0; for (a=0;a<vec_length;a++) sum+=vec[a]; avg=sum/(double)vec_length; sum=0.0; if (vec_length<=1) s1=0; else { for (a=0;a<vec_length;a++) sum+=(vec[a]-avg)*(vec[a]-avg); s1=sqrt(sum/(double)(vec_length-1)); } if (s1==0) { z[0]=avg;z[1]=s1;z[2]=0;z[3]=0; } else { z[0]=avg;z[1]=s1;z[2]=skewness(vec,avg,s1,vec_length);z[3]=kurtosis(vec,avg,s1,vec_length); } }
static void constraints() { typedef typename Distribution::value_type value_type; const Distribution& dist = DistributionConcept<Distribution>::get_object(); value_type x = 0; // The result values are ignored in all these checks. check_result<value_type>(cdf(dist, x)); check_result<value_type>(cdf(complement(dist, x))); check_result<value_type>(pdf(dist, x)); check_result<value_type>(quantile(dist, x)); check_result<value_type>(quantile(complement(dist, x))); check_result<value_type>(mean(dist)); check_result<value_type>(mode(dist)); check_result<value_type>(standard_deviation(dist)); check_result<value_type>(variance(dist)); check_result<value_type>(hazard(dist, x)); check_result<value_type>(chf(dist, x)); check_result<value_type>(coefficient_of_variation(dist)); check_result<value_type>(skewness(dist)); check_result<value_type>(kurtosis(dist)); check_result<value_type>(kurtosis_excess(dist)); check_result<value_type>(median(dist)); // // we can't actually test that at std::pair is returned from these // because that would mean including some std lib headers.... // range(dist); support(dist); check_result<value_type>(cdf(dist, f)); check_result<value_type>(cdf(complement(dist, f))); check_result<value_type>(pdf(dist, f)); check_result<value_type>(quantile(dist, f)); check_result<value_type>(quantile(complement(dist, f))); check_result<value_type>(hazard(dist, f)); check_result<value_type>(chf(dist, f)); check_result<value_type>(cdf(dist, d)); check_result<value_type>(cdf(complement(dist, d))); check_result<value_type>(pdf(dist, d)); check_result<value_type>(quantile(dist, d)); check_result<value_type>(quantile(complement(dist, d))); check_result<value_type>(hazard(dist, d)); check_result<value_type>(chf(dist, d)); check_result<value_type>(cdf(dist, l)); check_result<value_type>(cdf(complement(dist, l))); check_result<value_type>(pdf(dist, l)); check_result<value_type>(quantile(dist, l)); check_result<value_type>(quantile(complement(dist, l))); check_result<value_type>(hazard(dist, l)); check_result<value_type>(chf(dist, l)); check_result<value_type>(cdf(dist, i)); check_result<value_type>(cdf(complement(dist, i))); check_result<value_type>(pdf(dist, i)); check_result<value_type>(quantile(dist, i)); check_result<value_type>(quantile(complement(dist, i))); check_result<value_type>(hazard(dist, i)); check_result<value_type>(chf(dist, i)); unsigned long li = 1; check_result<value_type>(cdf(dist, li)); check_result<value_type>(cdf(complement(dist, li))); check_result<value_type>(pdf(dist, li)); check_result<value_type>(quantile(dist, li)); check_result<value_type>(quantile(complement(dist, li))); check_result<value_type>(hazard(dist, li)); check_result<value_type>(chf(dist, li)); }
void RunStatsCommand(ProgramData *p, int lcindex, int threadindex, _Stats *s) { int i, j, k, Npct; double *tmpdata = NULL, *tmpweight = NULL; if(p->NJD[threadindex] <= 0) { for(i=0, k=0; i < s->Nvar; i++) { for(j=0; j < s->Nstats; j++, k++) { s->statsout[threadindex][k] = 0.0; } } return; } if((tmpdata = (double *) malloc(p->NJD[threadindex]*sizeof(double))) == NULL) { error(ERR_MEMALLOC); } for(i = 0, k=0; i < s->Nvar; i++) { if(s->vars[i]->vectortype != VARTOOLS_VECTORTYPE_LC) { error(ERR_BADVARIABLETYPE_STATSCOMMAND); } for(j=0; j < p->NJD[threadindex]; j++) { tmpdata[j] = EvaluateVariable_Double(lcindex, threadindex, j, s->vars[i]); } Npct = 0; for(j = 0; j < s->Nstats; j++, k++) { switch(s->statstocalc[j]) { case VARTOOLS_STATSTYPE_MEAN: s->statsout[threadindex][k] = getmean(p->NJD[threadindex], tmpdata); break; case VARTOOLS_STATSTYPE_WEIGHTEDMEAN: s->statsout[threadindex][k] = getweightedmean(p->NJD[threadindex], tmpdata, p->sig[threadindex]); break; case VARTOOLS_STATSTYPE_MEDIAN: s->statsout[threadindex][k] = median(p->NJD[threadindex], tmpdata); break; case VARTOOLS_STATSTYPE_MEDIAN_WEIGHT: s->statsout[threadindex][k] = median_weight(p->NJD[threadindex], tmpdata, p->sig[threadindex]); break; case VARTOOLS_STATSTYPE_STDDEV: s->statsout[threadindex][k] = stddev(p->NJD[threadindex], tmpdata); break; case VARTOOLS_STATSTYPE_MEDDEV: s->statsout[threadindex][k] = meddev(p->NJD[threadindex], tmpdata); break; case VARTOOLS_STATSTYPE_MEDMEDDEV: s->statsout[threadindex][k] = medmeddev(p->NJD[threadindex], tmpdata); break; case VARTOOLS_STATSTYPE_MAD: s->statsout[threadindex][k] = MAD(p->NJD[threadindex], tmpdata); break; case VARTOOLS_STATSTYPE_KURTOSIS: s->statsout[threadindex][k] = kurtosis(p->NJD[threadindex], tmpdata); break; case VARTOOLS_STATSTYPE_SKEWNESS: s->statsout[threadindex][k] = skewness(p->NJD[threadindex], tmpdata); break; case VARTOOLS_STATSTYPE_PERCENTILE: s->statsout[threadindex][k] = percentile(p->NJD[threadindex], tmpdata, s->pctval[Npct]); Npct++; break; case VARTOOLS_STATSTYPE_PERCENTILE_WEIGHT: s->statsout[threadindex][k] = percentile_weight(p->NJD[threadindex], tmpdata, p->sig[threadindex], s->pctval[Npct]); Npct++; break; case VARTOOLS_STATSTYPE_MAXIMUM: s->statsout[threadindex][k] = getmaximum(p->NJD[threadindex],tmpdata); break; case VARTOOLS_STATSTYPE_MINIMUM: s->statsout[threadindex][k] = getminimum(p->NJD[threadindex],tmpdata); break; case VARTOOLS_STATSTYPE_SUM: s->statsout[threadindex][k] = getsum(p->NJD[threadindex],tmpdata); break; default: error(ERR_CODEERROR); } } } if(tmpdata != NULL) free(tmpdata); }