示例#1
0
 inline DynProgProbLim (const DynProgProbLim &dynProgProbLim_)
     : DynProgProb (dynProgProbLim_), 
     d_probLost (dynProgProbLim_.getProbLost ())
 {}
示例#2
0
 virtual inline void copy (const DynProgProbLim &dynProgProbLim_)
 {
     copy (dynProgProbLim_, dynProgProbLim_.getProbLost ());
 }
示例#3
0
void LocalMaxStatUtil::descendingLadderEpochRepeat (
size_t dimension_, // #(distinct values)          
const Int4 *score_, // values 
const double *prob_, // probability of corresponding value 
double *eSumAlpha_, // expectation (sum [alpha])
double *eOneMinusExpSumAlpha_, // expectation [1.0 - exp (sum [alpha])]
bool isStrict_, // ? is this a strict descending ladder epoch
double lambda_, // lambda for repeats : default is lambda0_ below
size_t endW_, // maximum w plus 1
double *pAlphaW_, // probability {alpha = w} : pAlphaW_ [0, wEnd)
double *eOneMinusExpSumAlphaW_, // expectation [1.0 - exp (sum [alpha]); alpha = w] : eOneMinusExpSumAlphaW_ [0, wEnd)
double lambda0_, // lambda for flattened distribution (avoid recomputation)
double mu0_, // mean of flattened distribution (avoid recomputation)
double muAssoc0_, // mean of associated flattened distribution (avoid recomputation)
double thetaMin0_, // thetaMin of flattened distribution (avoid recomputation)
double rMin0_, // rMin of flattened distribution (avoid recomputation)
double time_, // get time for the dynamic programming computation
bool *terminated_) // ? Was the dynamic programming computation terminated prematurely ?
// assumes logarithmic regime
{
    // Start dynamic programming probability calculation using notation in
    //
    // Mott R. and Tribe R. (1999)
    // J. Computational Biology 6(1):91-112
    //
    // Karlin S. and Taylor H.M.(1981)
    // A Second Course in Stochastic Processes, p. 480
    //
    // Note there is an error in Eq (6.19) there, which is corrected in Eq (6.20)
    //
    // This program uses departure into (-Inf, 0] not (-Inf, 0)

    // avoid recomputation
    double mu0 = 0.0 == mu0_ ? mu (dimension_, score_, prob_) : mu0_;
    assert (mu0 < 0.0);
    double lambda0 = 0.0 == lambda0_ ? lambda (dimension_, score_, prob_) : lambda0_;
    assert (0.0 < lambda0);
    if (lambda_ == 0.0) lambda_ = lambda0;
    assert (0.0 < lambda_);
    double muAssoc0 = 0.0 == muAssoc0_ ? muAssoc (dimension_, score_, prob_, lambda0) : muAssoc0_;
    assert (0.0 < muAssoc0);
    double thetaMin0 = 0.0 == thetaMin0_ ? thetaMin (dimension_, score_, prob_, lambda0) : thetaMin0_;
    assert (0.0 < thetaMin0);
    double rMin0 = 0.0 == rMin0_ ? rMin (dimension_, score_, prob_, lambda0, thetaMin0) : rMin0_;
    assert (0.0 < rMin0 && rMin0 < 1.0);

    const Int4 ITER_MIN = static_cast <Int4> ((log (REL_TOL * (1.0 - rMin0)) / log (rMin0)));
    assert (0 < ITER_MIN);
    const Int4 ITER = static_cast <Int4> (endW_) < ITER_MIN ? ITER_MIN : static_cast <Int4> (endW_);
    assert (0 < ITER);
    const Int4 Y_MAX = static_cast <Int4> (-log (REL_TOL) / lambda0);

    Int4 entry = isStrict_ ? -1 : 0;
    n_setParameters (dimension_, score_, prob_, entry);


    double time0 = 0.0;
    double time1 = 0.0;
        if (time_ > 0.0) Sls::alp_data::get_current_time (time0);

    DynProgProbLim dynProgProb (n_step, dimension_, prob_, score_ [0] - 1, Y_MAX);

    if (pAlphaW_) pAlphaW_ [0] = 0.0;
    if (eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [0] = 0.0;

    dynProgProb.update (); // iterate random walk

    Int4 value = 0;

    if (eSumAlpha_) *eSumAlpha_ = 0.0;
    if (eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ = 0.0;

    for (size_t w = 1; w < static_cast <size_t> (ITER); w++) {

        if (w < endW_) { // sum pAlphaW_ [w] and eOneMinusExpSumAlphaW_ [w]

             if (pAlphaW_) pAlphaW_ [w] = 0.0;
             if (eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [w] = 0.0;

             for (value = score_ [0]; value <= entry; value++) {
                if (pAlphaW_) pAlphaW_ [w] += dynProgProb.getProb (value);
                if (eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [w] += 
                                               dynProgProb.getProb (value) * 
                                               (1.0 - exp (lambda_ * static_cast <double> (value)));
             }
        }

        for (value = score_ [0]; value <= entry; value++) {
         if (eSumAlpha_) *eSumAlpha_ += dynProgProb.getProb (value) * static_cast <double> (value);
         if (eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ += dynProgProb.getProb (value) * 
                                        (1.0 - exp (lambda_ * static_cast <double> (value)));
        }

        dynProgProb.setValueFct (n_bury); 
        dynProgProb.update (); // put probability into the morgue

        dynProgProb.setValueFct (n_step); 
        dynProgProb.update (); // iterate random walk

        if (time_ > 0.0)
        {
                        Sls::alp_data::get_current_time (time1);
            if (time1 - time0 > time_) 
            {
                *terminated_ = true;
                return;
            }
        }

    }

    for (value = score_ [0]; value <= entry; value++) {
      if (eSumAlpha_) *eSumAlpha_ += dynProgProb.getProb (value) * static_cast <double> (value);
      if (eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ += dynProgProb.getProb (value) * 
                                     (1.0 - exp (lambda_ * static_cast <double> (value)));
    }

    // check that not too much probability has been omitted
    double prob = 0.0;
    for (value = entry + 1; value < dynProgProb.getValueUpper (); value++) {
      prob += dynProgProb.getProb (value);
    }
    prob += dynProgProb.getProbLost ();

    const double FUDGE = 2.0;
    assert (prob <= FUDGE * static_cast <double> (dimension_) * REL_TOL);
}