Пример #1
0
Vector_double
stf::filter( const Vector_double& data, std::size_t filter_start,
        std::size_t filter_end, const Vector_double &a, int SR,
        stf::Func func, bool inverse ) {
    if (data.size()<=0 || filter_start>=data.size() || filter_end > data.size()) {
        std::out_of_range e("subscript out of range in stf::filter()");
        throw e;
    }
    std::size_t filter_size=filter_end-filter_start+1;
    Vector_double data_return(filter_size);
    double SI=1.0/SR; //the sampling interval

    double *in;
    //fftw_complex is a double[2]; hence, out is an array of
    //double[2] with out[n][0] being the real and out[n][1] being
    //the imaginary part.
    fftw_complex *out;
    fftw_plan p1, p2;

    //memory allocation as suggested by fftw:
    in =(double *)fftw_malloc(sizeof(double) * filter_size);
    out=(fftw_complex *)fftw_malloc(sizeof(fftw_complex) * ((int)(filter_size/2)+1));

    // calculate the offset (a straight line between the first and last points):
    double offset_0=data[filter_start];
    double offset_1=data[filter_end]-offset_0;
    double offset_step=offset_1 / (filter_size-1);

    //fill the input array with data removing the offset:
    for (std::size_t n_point=0;n_point<filter_size;++n_point) {
        in[n_point]=data[n_point+filter_start]-(offset_0 + offset_step*n_point);
    }

    //plan the fft and execute it:
    p1 =fftw_plan_dft_r2c_1d((int)filter_size,in,out,FFTW_ESTIMATE);
    fftw_execute(p1);

    for (std::size_t n_point=0; n_point < (unsigned int)(filter_size/2)+1; ++n_point) {
        //calculate the frequency (in kHz) which corresponds to the index:
        double f=n_point / (filter_size*SI);
        double rslt= (!inverse? func(f,a) : 1.0-func(f,a));
        out[n_point][0] *= rslt;
        out[n_point][1] *= rslt;
    }

    //do the reverse fft:
    p2=fftw_plan_dft_c2r_1d((int)filter_size,out,in,FFTW_ESTIMATE);
    fftw_execute(p2);

    //fill the return array, adding the offset, and scaling by filter_size
    //(because fftw computes an unnormalized transform):
    data_return.resize(filter_size);
    for (std::size_t n_point=0; n_point < filter_size; ++n_point) {
        data_return[n_point]=(in[n_point]/filter_size + offset_0 + offset_step*n_point);
    }
    fftw_destroy_plan(p1);
    fftw_destroy_plan(p2);
    fftw_free(in);fftw_free(out);
    return data_return;
}
Пример #2
0
Vector_double stfnum::get_scale(Vector_double& data, double oldx) {
    Vector_double xyscale(4);

    if (data.size() == 0) {
        xyscale[0] = 1.0/oldx;
        xyscale[1] = 0.0;
        xyscale[2] = 1.0;
        xyscale[3] = 0.0;

        return xyscale;
    }

    double ymin,ymax,amp,off;

    ymin = *data.begin();
    ymax = ymin;
    for (Vector_double::iterator it = data.begin(); it != data.end(); ++it) {
        double v = *it;
        if (v < ymin) ymin = v;
        else if (ymax < v) ymax = v;
    }
    amp = ymax - ymin;
    off = ymin / amp;

    data = stfio::vec_scal_mul(data, 1.0 / amp);
    data = stfio::vec_scal_minus(data, off);

    xyscale[0] = 1.0/(data.size()*oldx);
    xyscale[1] = 0;
    xyscale[2] = 1.0/amp;
    xyscale[3] = off;
    
    return xyscale;
}
Пример #3
0
std::vector<int>
stf::peakIndices(const Vector_double& data, double threshold,
                 int minDistance)
{
    // to avoid unnecessary copying, we first reserve quite
    // a bit of space for the vector:
    std::vector<int> peakInd;
    peakInd.reserve(data.size());
    for (unsigned n_data=0; n_data<data.size(); ++n_data) {
        // check whether the data point is above threshold...
        int llp=n_data;
        int ulp=n_data+1;
        if (data[n_data]>threshold) {
            // ... and if so, find the data point where the threshold
            // is crossed again in the opposite direction, ...
            for (;;) {
                if (n_data>data.size()-1) {
                    ulp=(int)data.size()-1;
                    break;
                }
                n_data++;
                if (data[n_data]<threshold && (int)n_data-ulp>minDistance) {
                    // ... making this the upper limit of the peak window:
                    ulp=n_data;
                    break;
                }
            }
            // Now, find the peak within the window:
            double max=-1e8;
            int peakIndex=llp;
            for (int n_p=llp; n_p<=ulp; ++n_p) {
                if (data[n_p]>max) {
                    max=data[n_p];
                    peakIndex=n_p;
                }
            }
            peakInd.push_back(peakIndex);
        }
    }
    // Trim peakInd's reserved memory:
    std::vector<int>(peakInd.begin(),peakInd.end()).swap(peakInd);
    return peakInd;
}
Пример #4
0
std::map<double, int>
stf::histogram(const Vector_double& data, int nbins) {

    if (nbins==-1) {
        nbins = int(data.size()/100.0);
    }

    double fmax = *std::max_element(data.begin(), data.end());
    double fmin = *std::min_element(data.begin(), data.end());
    fmax += (fmax-fmin)*1e-9;

    double bin = (fmax-fmin)/nbins;

    std::map<double,int> histo;
    for (int nbin=0; fmin + nbin*bin < fmax; ++nbin) {
        histo[fmin + nbin*bin] = 0;
    }
    for (std::size_t npoint=0; npoint < data.size(); ++npoint) {
        int nbin = int((data[npoint]-fmin) / bin);
        histo[fmin + nbin*bin]++;
    }
    return histo;
}
Пример #5
0
stf::Table stf::defaultOutput(
	const Vector_double& pars,
	const std::vector<stf::parInfo>& parsInfo,
    double chisqr
) {
	if (pars.size()!=parsInfo.size()) {
		throw std::out_of_range("index out of range in stf::defaultOutput");
	}
        stf::Table output(pars.size()+1,1);
	try {
		output.SetColLabel(0,"Best-fit value");
		for (std::size_t n_p=0;n_p<pars.size(); ++n_p) {
			output.SetRowLabel(n_p,parsInfo[n_p].desc);
			output.at(n_p,0)=pars[n_p];
		}
        output.SetRowLabel(pars.size(),"SSE");
        output.at(pars.size(),0)=chisqr;
	}
	catch (...) {
		throw;
	}
	return output;
}
Пример #6
0
double stf::integrate_trapezium(
        const Vector_double& input,
        std::size_t i1,
        std::size_t i2,
        double x_scale
) {
    if (i2>=input.size() || i1>=i2) {
        throw std::out_of_range( "integration interval out of range in stf::integrate_trapezium" );
    }
    double a = i1 * x_scale;
    double b = i2 * x_scale;

    double sum=input[i1]+input[i2];
    for (std::size_t n=i1+1; n<i2; ++n) {
        sum += 2*input[n];
    }
    sum *= (b-a)/2/(i2-i1);
    return sum;
}
Пример #7
0
double stf::integrate_simpson(
        const Vector_double& input,
        std::size_t i1,
        std::size_t i2,
        double x_scale
) {

    // Use composite Simpson's rule to approximate the definite integral of f from a to b
    // check for out-of-range:
    if (i2>=input.size() || i1>=i2) {
        throw std::out_of_range( "integration interval out of range in stf::integrate_simpson" );
    }
    bool even = std::div((int)i2-(int)i1,2).rem==0;

    // use Simpson's rule for the even part:
    if (!even)
        i2--;
    std::size_t n=i2-i1;
    double a=i1*x_scale;
    double b=i2*x_scale;

    double sum_2=0.0, sum_4=0.0;
    for (std::size_t j = 1; j <= n/2; ++j) {
        if (j<n/2)
            sum_2+=input[i1+2*j];
        sum_4+=input[i1+2*j-1];
    }
    double sum=input[i1] + 2*sum_2 + 4*sum_4 + input[i2];
    sum *= (b-a)/(double)n;
    sum /= 3;

    // if uneven, add the last interval by trapezoidal integration:
    if (!even) {
        i2++;
        a = (i2-1)*x_scale;
        b = i2*x_scale;
        sum += (b-a)/2 * (input[i2]+input[i2-1]);
    }
    return sum;
}
Пример #8
0
Vector_double
stf::deconvolve(const Vector_double& data, const Vector_double& templ,
                int SR, double hipass, double lopass, stfio::ProgressInfo& progDlg)
{
    bool skipped = false;
    progDlg.Update( 0, "Starting deconvolution...", &skipped );
    if (data.size()<=0 || templ.size() <=0 || templ.size() > data.size()) {
        std::out_of_range e("subscript out of range in stf::filter()");
        throw e;
    }
    /* pad templ */
    Vector_double templ_padded(data.size());
    std::copy(templ.begin(), templ.end(), templ_padded.begin());
    if (templ.size() < templ_padded.size()) {
        std::fill(templ_padded.begin()+templ.size(), templ_padded.end(), 0);
    }

    Vector_double data_return(data.size());
    if (skipped) {
        data_return.resize(0);
        return data_return;
    }

    double *in_data, *in_templ_padded;
    //fftw_complex is a double[2]; hence, out is an array of
    //double[2] with out[n][0] being the real and out[n][1] being
    //the imaginary part.
    fftw_complex *out_data, *out_templ_padded;
    fftw_plan p_data, p_templ, p_inv;

    //memory allocation as suggested by fftw:
    in_data =(double *)fftw_malloc(sizeof(double) * data.size());
    out_data = (fftw_complex *)fftw_malloc(sizeof(fftw_complex) * ((int)(data.size()/2)+1));
    in_templ_padded =(double *)fftw_malloc(sizeof(double) * templ_padded.size());
    out_templ_padded = (fftw_complex *)fftw_malloc(sizeof(fftw_complex) * ((int)(templ_padded.size()/2)+1));

    std::copy(data.begin(), data.end(), &in_data[0]);
    std::copy(templ_padded.begin(), templ_padded.end(), &in_templ_padded[0]);

    //plan the ffts and execute them:
    p_data =fftw_plan_dft_r2c_1d((int)data.size(), in_data, out_data,
                                 FFTW_ESTIMATE);
    fftw_execute(p_data);
    p_templ =fftw_plan_dft_r2c_1d((int)templ_padded.size(),
                                  in_templ_padded, out_templ_padded, FFTW_ESTIMATE);
    fftw_execute(p_templ);

    double SI=1.0/SR; //the sampling interval
    progDlg.Update( 25, "Performing deconvolution...", &skipped );
    if (skipped) {
        data_return.resize(0);
        return data_return;
    }
    Vector_double f_c(1);
    for (std::size_t n_point=0; n_point < (unsigned int)(data.size()/2)+1; ++n_point) {
        /* highpass filter */
        double f = n_point / (data.size()*SI);

        double rslt_hi = 1.0;
        if (hipass > 0) {
            f_c[0] = hipass;
            rslt_hi = 1.0-fgaussColqu(f, f_c);
        }

        /* lowpass filter */
        double rslt_lo = 1.0;
        if (lopass > 0) {
            f_c[0] = lopass;
            rslt_lo= fgaussColqu(f, f_c);
        }

        /* do the division in place */
        double a = out_data[n_point][0];
        double b = out_data[n_point][1];
        double c = out_templ_padded[n_point][0];
        double d = out_templ_padded[n_point][1];
        double mag2 = c*c + d*d;
        out_data[n_point][0] = rslt_hi * rslt_lo * (a*c + b*d)/mag2;
        out_data[n_point][1] = rslt_hi * rslt_lo * (b*c - a*d)/mag2;
    }

    //do the reverse fft:
    p_inv = fftw_plan_dft_c2r_1d((int)data.size(),out_data, in_data, FFTW_ESTIMATE);
    fftw_execute(p_inv);

    //fill the return array, adding the offset, and scaling by data.size()
    //(because fftw computes an unnormalized transform):
    for (std::size_t n_point=0; n_point < data.size(); ++n_point) {
        data_return[n_point]= in_data[n_point]/data.size();
    }

    fftw_destroy_plan(p_data);
    fftw_destroy_plan(p_templ);
    fftw_destroy_plan(p_inv);

    fftw_free(in_data);
    fftw_free(out_data);
    fftw_free(in_templ_padded);
    fftw_free(out_templ_padded);

    progDlg.Update( 50, "Computing data histogram...", &skipped );
    if (skipped) {
        data_return.resize(0);
        return data_return;
    }
    int nbins =  int(data_return.size()/500.0);
    std::map<double, int> histo = histogram(data_return, nbins);
    double max_value = -1;
    double max_time = 0;
    double maxhalf_time = 0;
    Vector_double histo_fit(0);
    for (std::map<double,int>::const_iterator it=histo.begin();
         it != histo.end(); ++it) {
        if (it->second > max_value) {
            max_value = it->second;
            max_time = it->first;
        }
        histo_fit.push_back(it->second);
#ifdef _STFDEBUG
        std::cout << it->first << "\t" << it->second << std::endl;
#endif
    }
    for (std::map<double,int>::const_iterator it=histo.begin();
         it != histo.end(); ++it) {
        if (it->second > 0.5*max_value) {
            maxhalf_time = it->first;
            break;
        }
    }
    maxhalf_time = fabs(max_time-maxhalf_time);
    progDlg.Update( 75, "Fitting Gaussian...", &skipped );
    if (skipped) {
        data_return.resize(0);
        return data_return;
    }
    /* Fit Gaussian to histogram */
    Vector_double opts = LM_default_opts();

    std::string info;
    int warning;
    std::vector< stf::storedFunc > funcLib = stf::GetFuncLib();
    
    double interval = (++histo.begin())->first-histo.begin()->first;
    /* Initial parameter guesses */
    Vector_double pars(3);
    pars[0] = max_value;
    pars[1] = (max_time - histo.begin()->first);
    pars[2] = maxhalf_time *sqrt(2.0)/2.35482;
#ifdef _STFDEBUG    
    std::cout << "nbins: " << nbins << std::endl;
    std::cout << "initial values:" << std::endl;
    for (std::size_t np=0; np<pars.size(); ++np) {
        std::cout << pars[np] << std::endl;
    }
#endif

#ifdef _STFDEBUG
    double chisqr =
#endif
        lmFit(histo_fit, interval, funcLib[funcLib.size()-1], opts, true,
              pars, info, warning );
#ifdef _STFDEBUG
    std::cout << chisqr << "\t" << interval << std::endl;
    std::cout << "final values:" << std::endl;
    for (std::size_t np=0; np<pars.size(); ++np) {
        std::cout << pars[np] << std::endl;
    }
#endif
    double sigma = pars[2]/sqrt(2.0);
    /* return data in terms of sigma */
    for (std::size_t n_point=0; n_point < data.size(); ++n_point) {
        data_return[n_point] /= sigma;
    }
    progDlg.Update( 100, "Done.", &skipped );
    return data_return;
}
Пример #9
0
int
stf::linsolv( int m, int n, int nrhs, Vector_double& A,
              Vector_double& B)
{
#ifndef TEST_MINIMAL
    if (A.size()<=0) {
        throw std::runtime_error("Matrix A has size 0 in stf::linsolv");
    }

    if (B.size()<=0) {
        throw std::runtime_error("Matrix B has size 0 in stf::linsolv");
    }

    if (A.size()!= std::size_t(m*n)) {
        throw std::runtime_error("Size of matrix A is not m*n");
    }

    /* Arguments to dgetrf_
     *  ====================
     *
     *  M       (input) INTEGER
     *          The number of rows of the matrix A.  M >= 0.
     *
     *  N       (input) INTEGER
     *          The number of columns of the matrix A.  N >= 0.
     *
     *  A       (input/output) DOUBLE PRECISION array, dimension (LDA,N)
     *          On entry, the M-by-N matrix to be factored.
     *          On exit, the factors L and U from the factorization
     *          A = P*L*U; the unit diagonal elements of L are not stored.
     *
     *  LDA     (input) INTEGER
     *          The leading dimension of the array A.  LDA >= max(1,M).
     *
     *  IPIV    (output) INTEGER array, dimension (min(M,N))
     *          The pivot indices; for 1 <= i <= min(M,N), row i of the
     *          matrix was interchanged with row IPIV(i).
     *
     *  INFO    (output) INTEGER
     *          = 0:  successful exit
     *          < 0:  if INFO = -i, the i-th argument had an illegal value
     *          > 0:  if INFO = i, U(i,i) is exactly zero. The factorization
     *                has been completed, but the factor U is exactly
     *                singular, and division by zero will occur if it is used
     *                to solve a system of equations.
     */

    int lda_f = m;
    std::size_t ipiv_size = (m < n) ? m : n;
    std::vector<int> ipiv(ipiv_size);
    int info=0;

    dgetrf_(&m, &n, &A[0], &lda_f, &ipiv[0], &info);
    if (info<0) {
        std::ostringstream error_msg;
        error_msg << "Argument " << -info << " had an illegal value in LAPACK's dgetrf_";
		throw std::runtime_error( std::string(error_msg.str()));
    }
    if (info>0) {
        throw std::runtime_error("Singular matrix in LAPACK's dgetrf_; would result in division by zero");
    }


    /* Arguments to dgetrs_
     *  ====================
     *
     *  TRANS   (input) CHARACTER*1
     *          Specifies the form of the system of equations:
     *          = 'N':  A * X = B  (No transpose)
     *          = 'T':  A'* X = B  (Transpose)
     *          = 'C':  A'* X = B  (Conjugate transpose = Transpose)
     *
     *  N       (input) INTEGER
     *          The order of the matrix A.  N >= 0.
     *
     *  NRHS    (input) INTEGER
     *          The number of right hand sides, i.e., the number of columns
     *          of the matrix B.  NRHS >= 0.
     *
     *  A       (input) DOUBLE PRECISION array, dimension (LDA,N)
     *          The factors L and U from the factorization A = P*L*U
     *          as computed by DGETRF.
     *
     *  LDA     (input) INTEGER
     *          The leading dimension of the array A.  LDA >= max(1,N).
     *
     *  IPIV    (input) INTEGER array, dimension (N)
     *          The pivot indices from DGETRF; for 1<=i<=N, row i of the
     *          matrix was interchanged with row IPIV(i).
     *
     *  B       (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
     *          On entry, the right hand side matrix B.
     *          On exit, the solution matrix X.
     *
     *  LDB     (input) INTEGER
     *          The leading dimension of the array B.  LDB >= max(1,N).
     *
     *  INFO    (output) INTEGER
     *          = 0:  successful exit
     *          < 0:  if INFO = -i, the i-th argument had an illegal value
     */
    char trans='N';
    dgetrs_(&trans, &m, &nrhs, &A[0], &m, &ipiv[0], &B[0], &m, &info);
    if (info<0) {
        std::ostringstream error_msg;
        error_msg << "Argument " << -info << " had an illegal value in LAPACK's dgetrs_";
        throw std::runtime_error(error_msg.str());
    }
#endif
    return 0;
}
Пример #10
0
Vector_double
stf::linCorr(const Vector_double& data, const Vector_double& templ, stfio::ProgressInfo& progDlg)
{
    bool skipped = false;
    // the template has to be smaller than the data waveform:
    if (data.size()<templ.size()) {
        throw std::runtime_error("Template larger than data in stf::crossCorr");
    }
    if (data.size()==0 || templ.size()==0) {
        throw std::runtime_error("Array of size 0 in stf::crossCorr");
    }
    Vector_double Corr(data.size()-templ.size());

    // Optimal scaling & offset:
    // avoid redundant computations:
    double sum_templ_data=0.0, sum_templ=0.0, sum_templ_sqr=0.0, sum_data=0.0, sum_data_sqr=0.0;
    for (int n_templ=0; n_templ<(int)templ.size();++n_templ) {
        sum_templ_data+=templ[n_templ]*data[0+n_templ];
        sum_data+=data[0+n_templ];
        sum_data_sqr+=data[0+n_templ]*data[0+n_templ];
        sum_templ+=templ[n_templ];
        sum_templ_sqr+=templ[n_templ]*templ[n_templ];
    }
    double y_old=0.0;
    double y2_old=0.0;
    int progCounter=0;
    double progFraction=(data.size()-templ.size())/100;
    for (unsigned n_data=0; n_data<data.size()-templ.size(); ++n_data) {
        if (n_data/progFraction>progCounter) {
            progDlg.Update( (int)((double)n_data/(double)(data.size()-templ.size())*100.0),
                            "Calculating correlation coefficient", &skipped );
            if (skipped) {
                Corr.resize(0);
                return Corr;
            }
            progCounter++;
        }
        if (n_data!=0) {
            sum_templ_data=0.0;
            // The product has to be computed in full length:
            for (int n_templ=0; n_templ<(int)templ.size();++n_templ) {
                sum_templ_data+=templ[n_templ]*data[n_data+n_templ];
            }
            // The new value that will be added is:
            double y_new=data[n_data+templ.size()-1];
            double y2_new=data[n_data+templ.size()-1]*data[n_data+templ.size()-1];
            sum_data+=y_new-y_old;
            sum_data_sqr+=y2_new-y2_old;
        }
        // The first value that was added (and will have to be subtracted during
        // the next loop):
        y_old=data[n_data+0];
        y2_old=data[n_data+0]*data[n_data+0];

        double scale=(sum_templ_data-sum_templ*sum_data/templ.size())/
        (sum_templ_sqr-sum_templ*sum_templ/templ.size());
        double offset=(sum_data-scale*sum_templ)/templ.size();

        // Now that the optimal template has been found,
        // compute the correlation between data and optimal template.
        // The correlation coefficient is computed in a way that avoids
        // numerical instability; therefore, the sum of squares
        // computed above can't be re-used.
        // Get the means:
        double mean_data=sum_data/templ.size();
        double sum_optTempl=sum_templ*scale+offset*templ.size();
        double mean_optTempl=sum_optTempl/templ.size();

        // Get SDs:
        double sd_data=0.0;
        double sd_templ=0.0;
        for (int i=0;i<(int)templ.size();++i) {
            sd_data+=SQR(data[i+n_data]-mean_data);
            sd_templ+=SQR(templ[i]*scale+offset-mean_optTempl);
        }
        sd_data=sqrt(sd_data/templ.size());
        sd_templ=sqrt(sd_templ/templ.size());

        // Get correlation:
        double r=0.0;
        for (int i=0;i<(int)templ.size();++i) {
            r+=(data[i+n_data]-mean_data)*(templ[i]*scale+offset-mean_optTempl);
        }
        r/=((templ.size()-1)*sd_data*sd_templ);
        Corr[n_data]=r;
    }
    return Corr;
}
Пример #11
0
Vector_double
stf::detectionCriterion(const Vector_double& data, const Vector_double& templ, stfio::ProgressInfo& progDlg)
{
    bool skipped=false;
    // variable names are taken from Clements & Bekkers (1997) as long
    // as they don't interfere with C++ keywords (such as "template")
    Vector_double detection_criterion(data.size()-templ.size());
    // avoid redundant computations:
    double sum_templ_data=0.0, sum_templ=0.0, sum_templ_sqr=0.0, sum_data=0.0, sum_data_sqr=0.0;
    for (int n_templ=0; n_templ<(int)templ.size();++n_templ) {
        sum_templ_data+=templ[n_templ]*data[0+n_templ];
        sum_data+=data[0+n_templ];
        sum_data_sqr+=data[0+n_templ]*data[0+n_templ];
        sum_templ+=templ[n_templ];
        sum_templ_sqr+=templ[n_templ]*templ[n_templ];
    }
    double y_old=0.0;
    double y2_old=0.0;
    int progCounter=0;
    double progFraction=(data.size()-templ.size())/100;
    for (unsigned n_data=0; n_data<data.size()-templ.size(); ++n_data) {
        if (n_data/progFraction>progCounter) {
            progDlg.Update( (int)((double)n_data/(double)(data.size()-templ.size())*100.0),
                            "Calculating detection criterion", &skipped );
            if (skipped) {
                detection_criterion.resize(0);
                return detection_criterion;
            }
            progCounter++;
        }
        if (n_data!=0) {
            sum_templ_data=0.0;
            // The product has to be computed in full length:
            for (int n_templ=0; n_templ<(int)templ.size();++n_templ) {
                sum_templ_data+=templ[n_templ]*data[n_data+n_templ];
            }
            // The new value that will be added is:
            double y_new=data[n_data+templ.size()-1];
            double y2_new=data[n_data+templ.size()-1]*data[n_data+templ.size()-1];
            sum_data+=y_new-y_old;
            sum_data_sqr+=y2_new-y2_old;
        }
        // The first value that was added (and will have to be subtracted during
        // the next loop):
        y_old=data[n_data+0];
        y2_old=data[n_data+0]*data[n_data+0];

        double scale=(sum_templ_data-sum_templ*sum_data/templ.size())/
            (sum_templ_sqr-sum_templ*sum_templ/templ.size());
        double offset=(sum_data-scale*sum_templ)/templ.size();
        double sse=sum_data_sqr+scale*scale*sum_templ_sqr+templ.size()*offset*offset -
            2.0*(scale*sum_templ_data +
                 offset*sum_data-scale*offset*sum_templ);
        double standard_error=sqrt(sse/(templ.size()-1));
        detection_criterion[n_data]=(scale/standard_error);
    }
    return detection_criterion;
}
Пример #12
0
double stfnum::lmFit( const Vector_double& data, double dt,
                   const stfnum::storedFunc& fitFunc, const Vector_double& opts,
                   bool use_scaling,
                   Vector_double& p, std::string& info, int& warning )
{
    // Basic range checking:
    if (fitFunc.pInfo.size()!=p.size()) {
        std::string msg("Error in stfnum::lmFit()\n"
                "function parameters (p_fit) and parameters entered (p) have different sizes");
        throw std::runtime_error(msg);
    }
    if ( opts.size() != 6 ) {
        std::string msg("Error in stfnum::lmFit()\n"
                "wrong number of options");
        throw std::runtime_error(msg);
    }

    bool constrained = false;
    std::vector< double > constrains_lm_lb( fitFunc.pInfo.size() );
    std::vector< double > constrains_lm_ub( fitFunc.pInfo.size() );

    bool can_scale = use_scaling;
    
    for ( unsigned n_p=0; n_p < fitFunc.pInfo.size(); ++n_p ) {
        if ( fitFunc.pInfo[n_p].constrained ) {
            constrained = true;
            constrains_lm_lb[n_p] = fitFunc.pInfo[n_p].constr_lb;
            constrains_lm_ub[n_p] = fitFunc.pInfo[n_p].constr_ub;
        } else {
            constrains_lm_lb[n_p] = -DBL_MAX;
            constrains_lm_ub[n_p] = DBL_MAX;
        }
        if ( can_scale ) {
            if (fitFunc.pInfo[n_p].scale == stfnum::noscale) {
                can_scale = false;
            }
        }
    }

    // Store the functions at global scope:
    saveFunc(fitFunc.func);
    saveJac(fitFunc.jac);

    double info_id[LM_INFO_SZ];
    Vector_double data_ptr(data);
    Vector_double xyscale(4);
    if (can_scale) {
        xyscale = get_scale(data_ptr, dt);
    }
    
    // The parameters need to be separated into two parts:
    // Those that are to be fitted and those that the client wants
    // to keep constant. Since there is no native support to
    // do so in Lourakis' routines, the workaround is a little
    // tricky, making (ab)use of the *void pointer:

    // number of parameters that need to be fitted:
    int n_fitted=0;
    for ( unsigned n_p=0; n_p < fitFunc.pInfo.size(); ++n_p ) {
        n_fitted += fitFunc.pInfo[n_p].toFit;
    }
    // parameters that need to be fitted:
    Vector_double p_toFit(n_fitted);
    std::deque<bool> p_fit_bool( fitFunc.pInfo.size() );
    // parameters that are held constant:
    Vector_double p_const( fitFunc.pInfo.size()-n_fitted );
    for ( unsigned n_p=0, n_c=0, n_f=0; n_p < fitFunc.pInfo.size(); ++n_p ) {
        if (fitFunc.pInfo[n_p].toFit) {
            p_toFit[n_f++] = p[n_p];
            if (can_scale) {
                p_toFit[n_f-1] = fitFunc.pInfo[n_p].scale(p_toFit[n_f-1], xyscale[0],
                                                          xyscale[1], xyscale[2], xyscale[3]);
            }
        } else {
            p_const[n_c++] = p[n_p];
            if (can_scale) {
                p_const[n_c-1] = fitFunc.pInfo[n_p].scale(p_const[n_c-1], xyscale[0],
                                                          xyscale[1], xyscale[2], xyscale[3]);
            }
        }
        p_fit_bool[n_p] = fitFunc.pInfo[n_p].toFit;
    }
    // size * dt_new = 1 -> dt_new = 1.0/size
    double dt_finfo = dt;
    if (can_scale)
        dt_finfo = 1.0/data_ptr.size();

    fitInfo fInfo( p_fit_bool, p_const, dt_finfo );

    // make l-value of opts:
    Vector_double opts_l(5);
    for (std::size_t n=0; n < 4; ++n) opts_l[n] = opts[n];
    opts_l[4] = -1e-6;
    int it = 0;
    if (p_toFit.size()!=0 && data_ptr.size()!=0) {
        double old_info_id[LM_INFO_SZ];

        // initialize with initial parameter guess:
        Vector_double old_p_toFit(p_toFit);

#ifdef _DEBUG
        std::ostringstream optsMsg;
        optsMsg << "\nopts: ";
        for (std::size_t n_p=0; n_p < opts.size(); ++n_p)
            optsMsg << opts[n_p] << "\t";
        optsMsg << "\n" << "data_ptr[" << data_ptr.size()-1 << "]=" << data_ptr[data_ptr.size()-1] << "\n";
        optsMsg << "constrains_lm_lb: ";
        for (std::size_t n_p=0; n_p < constrains_lm_lb.size(); ++n_p) 
            optsMsg << constrains_lm_lb[n_p] << "\t";
        optsMsg << "\n" << "constrains_lm_ub: ";
        for (std::size_t n_p=0; n_p < constrains_lm_ub.size(); ++n_p) 
            optsMsg << constrains_lm_ub[n_p] << "\t";
        optsMsg << "\n\n";
        std::cout << optsMsg;
#endif

        while ( 1 ) {
#ifdef _DEBUG
            std::ostringstream paramMsg;
            paramMsg << "Pass: "******"\t";
            paramMsg << "p_toFit: ";
            for (std::size_t n_p=0; n_p < p_toFit.size(); ++n_p)
                paramMsg << p_toFit[n_p] << "\t";
            paramMsg << "\n";
            std::cout << paramMsg.str().c_str();
#endif

            if ( !fitFunc.hasJac ) {
                if ( !constrained ) {
                    dlevmar_dif( c_func_lour, &p_toFit[0], &data_ptr[0], n_fitted, 
                            (int)data.size(), (int)opts[4], &opts_l[0], info_id,
                            NULL, NULL, &fInfo );
                } else {
                    dlevmar_bc_dif( c_func_lour, &p_toFit[0], &data_ptr[0], n_fitted, 
                            (int)data.size(), &constrains_lm_lb[0], &constrains_lm_ub[0], NULL,
                            (int)opts[4], &opts_l[0], info_id, NULL, NULL, &fInfo );
                }
            } else {
                if ( !constrained ) {
                    dlevmar_der( c_func_lour, c_jac_lour, &p_toFit[0], &data_ptr[0], 
                            n_fitted, (int)data.size(), (int)opts[4], &opts_l[0], info_id,
                            NULL, NULL, &fInfo );                
                } else {
                    dlevmar_bc_der( c_func_lour,  c_jac_lour, &p_toFit[0], 
                            &data_ptr[0], n_fitted, (int)data.size(), &constrains_lm_lb[0], 
                            &constrains_lm_ub[0], NULL, (int)opts[4], &opts_l[0], info_id,
                            NULL, NULL, &fInfo );
                }
            }
            it++;
            if ( info_id[1] != info_id[1] ) {
                // restore previous parameters if new chisqr is NaN:
                p_toFit = old_p_toFit;
            } else {
                double dchisqr = (info_id[0] - info_id[1]) / info_id[1]; // (old chisqr - new chisqr) / new_chisqr
            
                if ( dchisqr < 0 ) {
                    // restore previous results and exit if new chisqr is larger:
                    for ( int n_i = 0; n_i < LM_INFO_SZ; ++n_i )  info_id[n_i] = old_info_id[n_i];
                    p_toFit = old_p_toFit;
                    break;
                }
                if ( dchisqr < 1e-5 ) {
                    // Keep current results and exit if change in chisqr is below threshold
                    break;
                }
                // otherwise, store results and continue iterating:
                for ( int n_i = 0; n_i < LM_INFO_SZ; ++n_i ) old_info_id[n_i] = info_id[n_i];
                old_p_toFit = p_toFit;
            }
            if ( it >= opts[5] )
                // Exit if maximal number of iterations is reached
                break;
            // decrease initial step size for next iteration:
            opts_l[0] *= 1e-4;
        }
    } else {
        std::runtime_error e("Array of size zero in lmFit");
        throw e;
    }

    // copy back the fitted parameters to p:
    for ( unsigned n_p=0, n_f=0, n_c=0; n_p<fitFunc.pInfo.size(); ++n_p ) {
        if (fitFunc.pInfo[n_p].toFit) {
            p[n_p] = p_toFit[n_f++];
        } else {
            p[n_p] = p_const[n_c++];
        }
        if (can_scale) {
            p[n_p] = fitFunc.pInfo[n_p].unscale(p[n_p], xyscale[0],
                                                xyscale[1], xyscale[2], xyscale[3]);
        }
    }
    
    std::ostringstream str_info;
    str_info << "Passes: " << it;
    str_info << "\nIterations during last pass: "******"\nStopping reason during last pass:"******"\nStopped by small gradient of squared error.";
         warning = 0;
         break;
     case 2:
         str_info << "\nStopped by small rel. parameter change.";
         warning = 0;
         break;
     case 3:
         str_info << "\nReached max. number of iterations. Restart\n"
                  << "with smarter initial parameters and / or with\n"
                  << "increased initial scaling factor and / or with\n"
                  << "increased max. number of iterations.";
         warning = 3;
         break;
     case 4:
         str_info << "\nSingular matrix. Restart from current parameters\n"
                  << "with increased initial scaling factor.";
         warning = 4;
         break;
     case 5:
         str_info << "\nNo further error reduction is possible.\n"
                  << "Restart with increased initial scaling factor.";
         warning = 5;
         break;
     case 6:
         str_info << "\nStopped by small squared error.";
         warning = 0;
         break;
     case 7:
         str_info << "\nStopped by invalid (i.e. NaN or Inf) \"func\" values.\n";
         str_info << "This is a user error.";
         warning = 7;
         break;
     default:
         str_info << "\nUnknown reason for stopping the fit.";
         warning = -1;
    }
    if (use_scaling && !can_scale) {
        str_info << "\nCouldn't use scaling because one or more "
                 << "of the parameters don't allow it.";
    }
    info=str_info.str();
    return info_id[1];
}
Пример #13
0
Vector_double stfio::vec_vec_div(const Vector_double& vec1, const Vector_double& vec2) {
    Vector_double ret_vec(vec1.size());
    std::transform(vec1.begin(), vec1.end(), vec2.begin(), ret_vec.begin(), std::divides<double>());
    return ret_vec;
}
Пример #14
0
Vector_double stfio::vec_scal_div(const Vector_double& vec, double scalar) {
    Vector_double ret_vec(vec.size(), scalar);
    std::transform(vec.begin(), vec.end(), ret_vec.begin(), ret_vec.begin(), std::divides<double>());
    return ret_vec;
}