template <class PathType> inline
    void LongstaffSchwartzPathPricer<PathType>::calibrate() {
        const Size n = paths_.size();
        Array prices(n), exercise(n);
        const Size len = EarlyExerciseTraits<PathType>::pathLength(paths_[0]);

        for (Size i=0; i<n; ++i)
            prices[i] = (*pathPricer_)(paths_[i], len-1);

        std::vector<Real>      y;
        std::vector<StateType> x;
        for (Size i=len-2; i>0; --i) {
            y.clear();
            x.clear();

            //roll back step
            for (Size j=0; j<n; ++j) {
                exercise[j]=(*pathPricer_)(paths_[j], i);

                if (exercise[j]>0.0) {
                    x.push_back(pathPricer_->state(paths_[j], i));
                    y.push_back(dF_[i]*prices[j]);
                }
            }

            if (v_.size() <=  x.size()) {
                coeff_[i] = GeneralLinearLeastSquares(x, y, v_).coefficients();
            }
            else {
            // if number of itm paths is smaller then the number of
            // calibration functions then early exercise if exerciseValue > 0
                coeff_[i] = Array(v_.size(), 0.0);
            }

            for (Size j=0, k=0; j<n; ++j) {
                prices[j]*=dF_[i];
                if (exercise[j]>0.0) {
                    Real continuationValue = 0.0;
                    for (Size l=0; l<v_.size(); ++l) {
                        continuationValue += coeff_[i][l] * v_[l](x[k]);
                    }
                    if (continuationValue < exercise[j]) {
                        prices[j] = exercise[j];
                    }
                    ++k;
                }
            }
        }

        // remove calibration paths and release memory
        std::vector<PathType> empty;
        paths_.swap(empty);
        // entering the calculation phase
        calibrationPhase_ = false;
    }
void LongstaffSchwartzProxyPathPricer::post_processing(
    const Size i, const std::vector<StateType> &state,
    const std::vector<Real> &price, const std::vector<Real> &exercise) {

    std::vector<StateType> x_itm, x_otm;
    std::vector<Real> y_itm, y_otm;

    cutoff_ = -QL_MAX_REAL;

    for (Size j = 0; j < state.size(); ++j) {
        if (exercise[j] > 0.0) {
            x_itm.push_back(state[j]);
            y_itm.push_back(price[j]);
        } else {
            x_otm.push_back(state[j]);
            y_otm.push_back(price[j]);
            if(state[j]>cutoff_)
                cutoff_ = state[j];
        }
    }

    if (v_.size() <= x_itm.size()) {
        coeffItm_[i - 1] =
            GeneralLinearLeastSquares(x_itm, y_itm, v_).coefficients();
    } else {
        // see longstaffschwartzpricer.hpp
        coeffItm_[i - 1] = Array(v_.size(), 0.0);
    }

    if (v_.size() <= x_otm.size()) {
        coeffOtm_[i - 1] =
            GeneralLinearLeastSquares(x_otm, y_otm, v_).coefficients();
    } else {
        // see longstaffschwartzpricer.hpp
        coeffOtm_[i - 1] = Array(v_.size(), 0.0);
    }
}
    template <class PathType> inline
    void LongstaffSchwartzPathPricer<PathType>::calibrate() {
#ifdef _OPENMP
        for (Size i = 0; i < pathsMt_.size(); ++i) {
            paths_.insert(paths_.end(), pathsMt_[i].begin(), pathsMt_[i].end());
        }
#endif
        const Size n = paths_.size();
        Array prices(n), exercise(n);
        std::vector<StateType> p_state(n);
        std::vector<Real> p_price(n), p_exercise(n);

        for (Size i=0; i<n; ++i) {
            p_state[i] = pathPricer_->state(paths_[i],len_-1);
            prices[i] = p_price[i] = (*pathPricer_)(paths_[i], len_-1);
            p_exercise[i] = prices[i];
        }

        post_processing(len_ - 1, p_state, p_price, p_exercise);

        std::vector<Real>      y;
        std::vector<StateType> x;
        for (Size i=len_-2; i>0; --i) {
            y.clear();
            x.clear();

            //roll back step
            for (Size j=0; j<n; ++j) {
                exercise[j]=(*pathPricer_)(paths_[j], i);
                if (exercise[j]>0.0) {
                    x.push_back(pathPricer_->state(paths_[j], i));
                    y.push_back(dF_[i]*prices[j]);
                }
            }

            if (v_.size() <=  x.size()) {
                coeff_[i-1] = GeneralLinearLeastSquares(x, y, v_).coefficients();
            }
            else {
            // if number of itm paths is smaller then the number of
            // calibration functions then early exercise if exerciseValue > 0
                coeff_[i-1] = Array(v_.size(), 0.0);
            }

            for (Size j=0, k=0; j<n; ++j) {
                prices[j]*=dF_[i];
                if (exercise[j]>0.0) {
                    Real continuationValue = 0.0;
                    for (Size l=0; l<v_.size(); ++l) {
                        continuationValue += coeff_[i-1][l] * v_[l](x[k]);
                    }
                    if (continuationValue < exercise[j]) {
                        prices[j] = exercise[j];
                    }
                    ++k;
                }
                p_state[j] = pathPricer_->state(paths_[j],i);
                p_price[j] = prices[j];
                p_exercise[j] = exercise[j];
            }

            post_processing(i, p_state, p_price, p_exercise);
        }

        // remove calibration paths and release memory
        std::vector<PathType> empty;
        paths_.swap(empty);
        // entering the calculation phase
        calibrationPhase_ = false;
    }
    void LongstaffSchwartzMultiPathPricer::calibrate() {
        const Size n = paths_.size(); // number of paths
        Array prices(n, 0.0), exercise(n, 0.0);

        const Size basisDimension = payoff_->basisSystemDimension();

        const Size len = paths_[0].pathLength();

        /*
          We try to estimate the lower bound of the continuation value,
          so that only itm paths contribute to the regression.
         */

        for (Size j = 0; j < n; ++j) {
            const Real payoff = paths_[j].payments[len - 1];
            const Real exercise = paths_[j].exercises[len - 1];
            const Array & states = paths_[j].states[len - 1];
            const bool canExercise = !states.empty();

            // at the end the continuation value is 0.0
            if (canExercise && exercise > 0.0)
                prices[j] += exercise;
            prices[j] += payoff;
        }

        lowerBounds_[len - 1] = *std::min_element(prices.begin(), prices.end());

        std::vector<bool> lsExercise(n);

        for (Integer i = len - 2; i >= 0; --i) {
            std::vector<Real>  y;
            std::vector<Array> x;

            // prices are discounted up to time i
            const Real discountRatio = dF_[i + 1] / dF_[i];
            prices *= discountRatio;
            lowerBounds_[i + 1] *= discountRatio;

            //roll back step
            for (Size j = 0; j < n; ++j) {
                exercise[j] = paths_[j].exercises[i];

                // If states is empty, no exercise in this path
                // and the path will not partecipate to the Lesat Square regression

                const Array & states = paths_[j].states[i];
                QL_REQUIRE(states.empty() || states.size() == basisDimension, 
                           "Invalid size of basis system");

                // only paths that could potentially create exercise opportunities
                // partecipate to the regression

                // if exercise is lower than minimum continuation value, no point in considering it
                if (!states.empty() && exercise[j] > lowerBounds_[i + 1]) {
                    x.push_back(states);
                    y.push_back(prices[j]);
                }
            }

            if (v_.size() <=  x.size()) {
                coeff_[i] = GeneralLinearLeastSquares(x, y, v_).coefficients();
            }
            else {
            // if number of itm paths is smaller then the number of
            // calibration functions -> never exercise
                QL_TRACE("Not enough itm paths: default decision is NEVER");
                coeff_[i] = Array(0);
            }

            /* attempt to avoid static arbitrage given by always or never exercising.

               always is absolute: regardless of the lowerBoundContinuationValue_ (this could be changed)
               but it still honours "canExercise"
             */
            Real sumOptimized = 0.0;
            Real sumNoExercise = 0.0;
            Real sumAlwaysExercise = 0.0; // always, if allowed

            for (Size j = 0, k = 0; j < n; ++j) {
                sumNoExercise += prices[j];
                lsExercise[j] = false;

                const bool canExercise = !paths_[j].states[i].empty();
                if (canExercise) {
                    sumAlwaysExercise += exercise[j];
                    if (!coeff_[i].empty() && exercise[j] > lowerBounds_[i + 1]) {
                        Real continuationValue = 0.0;
                        for (Size l = 0; l < v_.size(); ++l) {
                            continuationValue += coeff_[i][l] * v_[l](x[k]);
                        }
                        
                        if (continuationValue < exercise[j]) {
                            lsExercise[j] = true;
                        }
                        ++k;
                    }
                }
                else {
                    sumAlwaysExercise += prices[j];
                }

                sumOptimized += lsExercise[j] ? exercise[j] : prices[j];
            }

            sumOptimized /= n;
            sumNoExercise /= n;
            sumAlwaysExercise /= n;

            QL_TRACE(   "Time index: " << i 
                     << ", LowerBound: " << lowerBounds_[i + 1] 
                     << ", Optimum: " << sumOptimized 
                     << ", Continuation: " << sumNoExercise 
                     << ", Termination: " << sumAlwaysExercise);

            if (  sumOptimized >= sumNoExercise 
                && sumOptimized >= sumAlwaysExercise) {
                
                QL_TRACE("Accepted LS decision");
                for (Size j = 0; j < n; ++j) {
                    // lsExercise already contains "canExercise"
                    prices[j] = lsExercise[j] ? exercise[j] : prices[j];
                }
            }
            else if (sumAlwaysExercise > sumNoExercise) {
                QL_TRACE("Overridden bad LS decision: ALWAYS");
                for (Size j = 0; j < n; ++j) {
                    const bool canExercise = !paths_[j].states[i].empty();
                    prices[j] = canExercise ? exercise[j] : prices[j];
                }
                // special value to indicate always exercise
                coeff_[i] = Array(v_.size() + 1); 
            }
            else {
                QL_TRACE("Overridden bad LS decision: NEVER");
                // prices already contain the continuation value
                // special value to indicate never exercise
                coeff_[i] = Array(0); 
            }

            // then we add in any case the payment at time t
            // which is made even if cancellation happens at t
            for (Size j = 0; j < n; ++j) {
                const Real payoff = paths_[j].payments[i];
                prices[j] += payoff;
            }

            lowerBounds_[i] = *std::min_element(prices.begin(), prices.end());
        }

        // remove calibration paths
        paths_.clear();
        // entering the calculation phase
        calibrationPhase_ = false;
    }