Пример #1
0
void llvm_model::set_prediction(const ObsId & obs_id, boost::ptr_vector<theta::Function> & coeffs_, boost::ptr_vector<HistogramFunction> & histos_){
    observables.insert(obs_id);
    const size_t n = coeffs_.size();
    if(n!=coeffs_.size()) throw invalid_argument("Model::setPrediction: number of histograms and coefficients do not match");
    if(histos[obs_id].size()>0 || coeffs[obs_id].size()>0)
        throw invalid_argument("Model::setPrediction: prediction already set for this observable");
    coeffs[obs_id].transfer(coeffs[obs_id].end(), coeffs_.begin(), coeffs_.end(), coeffs_);
    histos[obs_id].transfer(histos[obs_id].end(), histos_.begin(), histos_.end(), histos_);
    for(boost::ptr_vector<theta::Function>::const_iterator it=coeffs[obs_id].begin(); it!=coeffs[obs_id].end(); ++it){
        ParIds pids = (*it).get_parameters();
        parameters.insert(pids.begin(), pids.end());
    }
    size_t nbins = 0;
    double xmin = NAN, xmax = NAN;
    bool first = true;
    for(boost::ptr_vector<HistogramFunction>::const_iterator it=histos[obs_id].begin(); it!=histos[obs_id].end(); ++it){
        if(first){
            it->get_histogram_dimensions(nbins, xmin, xmax);
            first = false;
        }
        else{
            size_t nbins_tmp = 0;
            double xmin_tmp = NAN, xmax_tmp = NAN;
            it->get_histogram_dimensions(nbins_tmp, xmin_tmp, xmax_tmp);
            if(nbins!=nbins_tmp || xmin!=xmin_tmp || xmax!=xmax_tmp){
                throw invalid_argument("llvm_model::set_prediction: histogram dimensions mismatch");
            }
        }
        const ParIds & pids = (*it).get_parameters();
        parameters.insert(pids.begin(), pids.end());
    }
}
Пример #2
0
FunctionInfo::FunctionInfo(const ParValues & start_, const ParValues & step_, const Ranges & ranges_, const ParValues & fixed_parameters): start(start_), step(step_), ranges(ranges_){
    ParIds pids = start.get_parameters();
    for(ParIds::const_iterator pit=pids.begin(), it_end = pids.end(); pit!=it_end; ++pit){
        if(!step.contains(*pit)) throw invalid_argument("FunctionInfo: step does not contain all parameters from start");
        // 1. check whether parameter is fixed by step and range:
        const pair<double, double> & r = ranges.get(*pit);
        if(r.first==r.second){
            if(step.get_unchecked(*pit)>0.0){
                throw invalid_argument("FunctionInfo: inconsistent range/step given: range empty but step > 0");
            }
            fixed_parids.insert(*pit);
        }
        else{
            if(step.get_unchecked(*pit)<=0.0){
                throw invalid_argument("FunctionInfo: step <= 0.0 for non-empty range given");
            }
        }
        // 2. check whether parameter is fixed by fixed_parameters. Note that
        //  the value given in fixed_parameter overrides the value according to start.
        if(fixed_parameters.contains(*pit)){
            double val = fixed_parameters.get(*pit);
            start.set(*pit, val);
            ranges.set(*pit, make_pair(val, val));
            step.set(*pit, 0.0);
            fixed_parids.insert(*pit);
        }
    }
}
Пример #3
0
void theta::fill_mode_support(theta::ParValues & mode,
                std::map<theta::ParId, std::pair<double, double> > & support, const theta::Distribution & d){
    ParIds pids = d.get_parameters();
    d.mode(mode);
    theta_assert(mode.contains_all(pids));
    for(ParIds::const_iterator p_it=pids.begin(); p_it!=pids.end(); ++p_it){
        support[*p_it] = d.support(*p_it);
    }
}
Пример #4
0
boost::shared_ptr<FunctionInfo> Minimizer::create_nll_function_info(const Model & model, const boost::shared_ptr<Distribution> & override_parameter_distribution, const ParValues & fixed_parameters){
    const Distribution & dist = override_parameter_distribution.get()? *override_parameter_distribution: model.get_parameter_distribution();
    ParValues start;
    dist.mode(start);
    Ranges ranges(dist);
    ParIds pids = fixed_parameters.get_parameters();
    for(ParIds::const_iterator pit = pids.begin(); pit!=pids.end(); ++pit){
        double val = fixed_parameters.get(*pit);
        start.set(*pit, val);
        ranges.set(*pit, make_pair(val, val));
    }
    ParValues step = asimov_likelihood_widths(model, override_parameter_distribution);
    return boost::shared_ptr<FunctionInfo>(new DefFunctionInfo(start, step, ranges, fixed_parameters));
}
Пример #5
0
MinimizationResult Minimizer::minimize2(const Function & f, const FunctionInfo & info, const ParValues & fixed_parameters){
    dynamic_cast<const DefFunctionInfo&>(info); // throws bad_cast
    ParIds pids = fixed_parameters.get_parameters();
    if(pids.size()==0){
        return minimize(f, info.get_start(), info.get_step(), info.get_ranges());
    }
    else{
        ParValues start(info.get_start());
        ParValues step(info.get_step());
        Ranges ranges(info.get_ranges());
        const ParIds & info_fixed = info.get_fixed_parameters();
        for(ParIds::const_iterator pit = pids.begin(); pit!=pids.end(); ++pit){
            if(!info_fixed.contains(*pit)){
                throw invalid_argument("fixed parameter in minimize2 which is not fixed in info. This is not allowed.");
            }
            double val = fixed_parameters.get(*pit);
            start.set(*pit, val);
            step.set(*pit, 0.0);
            ranges.set(*pit, make_pair(val, val));
        }
        return minimize(f, start, step, ranges);
    }
}
theta::ParValues asimov_likelihood_widths(const theta::Model & model, const boost::shared_ptr<Distribution> & override_parameter_distribution){
    const Distribution & dist = override_parameter_distribution.get()? *override_parameter_distribution: model.get_parameter_distribution();
    ParIds parameters = model.getParameters();
    ParValues mode;
    dist.mode(mode);
    Data asimov_data;
    model.get_prediction(asimov_data, mode);
    std::auto_ptr<NLLikelihood> nll = model.getNLLikelihood(asimov_data);
    //0 value has same semantics for NLLikelihood:
    nll->set_override_distribution(override_parameter_distribution);
    double nll_at_min = (*nll)(mode);
    ParValues result;
    int k=0;
    for(ParIds::const_iterator it=parameters.begin(); it!=parameters.end(); ++it, ++k){
        ParId pid = *it;
        const double pid_mode = mode.get(pid);
        std::pair<double, double> support = dist.support(pid);
        assert(support.first <= pid_mode && pid_mode <= support.second);
        if(support.first == support.second){
            result.set(pid, 0.0);
            continue;
        }
        nll_mode_pid f(mode, pid, *nll, nll_at_min + 0.5);
        //if one end is finite, try to use it. Save whether the interval end is considered
        // "fl0", i.e. the interval end itself is finite but the function value there is invalid (< 0).
        bool low_is_fl0 = false, high_is_fl0 = false;
        if(std::isfinite(support.second)){
            double f2 = f(support.second);
            if(f2==0.0){
                result.set(pid, fabs(pid_mode - support.second));
                continue;
            }
            if(!std::isfinite(f2) || f2 < 0){
               low_is_fl0 = true;
            }
            else{
               result.set(pid, fabs(pid_mode - secant(pid_mode, support.second, 0.0, -0.5, f2, 0.05, f)));
               continue;
            }
        }
        if(std::isfinite(support.first)){
            double f2 = f(support.first);
            if(f2==0.0){
                result.set(pid, fabs(pid_mode - support.first));
                continue;
            }
            if(!std::isfinite(f2) || f2 < 0){
               high_is_fl0 = true;
            }
            else{
               result.set(pid, fabs(pid_mode - secant(support.first, pid_mode, 0.0, f2, -0.5, 0.05, f)));
               continue;
            }
        }
        //the support was either infinite or the values at the borders were not sufficiently high.
        // Treat second case first:
        if(low_is_fl0 && high_is_fl0){
            result.set(pid, support.second - support.first);
            continue;
        }
        //Now, one of the interval ends has to be infinite, otherwise we would not be here.
        //Scan in that direction:
        assert(std::isinf(support.first) || std::isinf(support.second));
        bool found = false;
        for(double sign = -1.0; sign <= 1.001; sign+=2.0){
            if(!std::isinf(support.first) && sign < 0) continue;
            if(!std::isinf(support.second) && sign > 0) continue;
            // as step size, try the parameter value, if it is not zero:
            double step = fabs(pid_mode);
            if(step==0) step = 1.0;
            for(int i=0; i<1000; ++i){
                double fval = f(pid_mode + sign * step);
                if(isinf(fval)){
                    step /= 1.5;
                    continue;
                }
                step *= 2.0;
                if(fval > 0){
                    double xlow, xhigh, flow, fhigh;
                    xlow = pid_mode; flow = -0.5;
                    xhigh = pid_mode + sign * step; fhigh = fval;
                    if(sign < 0){
                        std::swap(xlow, xhigh);
                        std::swap(flow, fhigh);
                    }
                    assert(xlow <= xhigh);
                    result.set(pid, fabs(pid_mode - secant(xlow, xhigh, 0.0, flow, fhigh, 0.05, f)));
                    found = true;
                    break;
                }
            }
            if(found) break;
        }
        if(found) continue;
        stringstream ss;
        ss << "asimov_likelihood_widths: could not find width for parameter " << pid;
        throw Exception(ss.str());
    }
    return result;
}
Пример #7
0
MinimizationResult root_minuit::minimize(const theta::Function & f, const theta::ParValues & start,
        const theta::ParValues & steps, const std::map<theta::ParId, std::pair<double, double> > & ranges){
    //I would like to re-use min. However, it horribly fails after very few uses with
    // unsigned int ROOT::Minuit2::MnUserTransformation::IntOfExt(unsigned int) const: Assertion `!fParameters[ext].IsFixed()' failed.
    // when calling SetFixedVariable(...).
    //Using a "new" one every time seems very wastefull, but it seems to work ...
    std::auto_ptr<ROOT::Minuit2::Minuit2Minimizer> min(new ROOT::Minuit2::Minuit2Minimizer(type));
    //min->SetPrintLevel(0);
    if(max_function_calls > 0) min->SetMaxFunctionCalls(max_function_calls);
    if(max_iterations > 0) min->SetMaxIterations(max_iterations);
    MinimizationResult result;

    //1. setup parameters, limits and initial step sizes
    ParIds parameters = f.get_parameters();
    int ivar=0;
    for(ParIds::const_iterator it=parameters.begin(); it!=parameters.end(); ++it, ++ivar){
        std::map<theta::ParId, std::pair<double, double> >::const_iterator r_it = ranges.find(*it);
        if(r_it==ranges.end()) throw invalid_argument("root_minuit::minimize: range not set for a parameter");
        pair<double, double> range = r_it->second;
        double def = start.get(*it);
        double step = steps.get(*it);
        stringstream ss;
        ss << "par" << ivar;
        string name = ss.str();
        //use not the ranges directly, but a somewhat more narrow range (one permille of the respective border)
        // in order to avoid that the numerical evaluation of the numerical derivative at the boundaries pass these
        // boundaries ...
        if(step == 0.0){
            min->SetFixedVariable(ivar, name, def);
        }
        else if(isinf(range.first)){
            if(isinf(range.second)){
                min->SetVariable(ivar, name, def, step);
            }
            else{
                min->SetUpperLimitedVariable(ivar, name, def, step, range.second - fabs(range.second) * 0.001);
            }
        }
        else{
            if(isinf(range.second)){
                min->SetLowerLimitedVariable(ivar, name, def, step, range.first + fabs(range.first) * 0.001);
            }
            else{ // both ends are finite
                if(range.first==range.second){
                    min->SetFixedVariable(ivar, name, range.first);
                }
                else{
                    min->SetLimitedVariable(ivar, name, def, step, range.first + fabs(range.first) * 0.001, range.second - fabs(range.second) * 0.001);
                }
            }
        }
    }

    //2. setup the function
    RootMinuitFunctionAdapter minuit_f(f);
    min->SetFunction(minuit_f);

    //3. setup tolerance
    min->SetTolerance(tolerance_factor * min->Tolerance());
    //3.a. error definition. Unfortunately, SetErrorDef in ROOT is not documented, so I had to guess.
    // 0.5 seems to work somehow.
    min->SetErrorDef(0.5);
    
    //4. minimize. In case of failure, try harder. Discard all output generated in min->Minimize.
    bool success;
    {
        theta::utils::discard_output d_o(true);
        success = min->Minimize();
        if(!success){
            for(int i=1; i<=n_retries; i++){
                success = min->Minimize();
                if(success) break;
            }
        }
    } // d_o is destroyed, output resumed.

    //5. do error handling
    if(not success){
        int status = min->Status();
        int status_1 = status % 10;
        //int status_2 = status / 10;
        stringstream s;
        s << "MINUIT returned status " << status;
        switch(status_1){
            case 1: s << " (Covariance was made pos defined)"; break;
            case 2: s << " (Hesse is invalid)"; break;
            case 3: s << " (Edm is above max)"; break;
            case 4: s << " (Reached call limit)"; break;
            case 5: s << " (Some other failure)"; break;
            default:
                s << " [unexpected status code]";
        }
        throw MinimizationException(s.str());
    }

    //6. convert result
    result.fval = min->MinValue();
    ivar = 0;
    const double * x = min->X();
    const double * errors = 0;
    bool have_errors = min->ProvidesError();
    if(have_errors) errors = min->Errors();
    for(ParIds::const_iterator it=parameters.begin(); it!=parameters.end(); ++it, ++ivar){
        result.values.set(*it, x[ivar]);
        if(have_errors){
            result.errors_plus.set(*it, errors[ivar]);
            result.errors_minus.set(*it, errors[ivar]);
        }
        else{
            result.errors_plus.set(*it, -1);
            result.errors_minus.set(*it, -1);
        }
    }
    result.covariance.reset(parameters.size(), parameters.size());
    //I would use min->CovMatrixStatus here to check the validity of the covariance matrix,
    // if only it was documented ...
    if(min->ProvidesError()){
        for(size_t i=0; i<parameters.size(); ++i){
            for(size_t j=0; j<parameters.size(); ++j){
                result.covariance(i,j) = min->CovMatrix(i,j);
            }
        }
    }
    else{
        for(size_t i=0; i<parameters.size(); ++i){
            result.covariance(i,i) = -1;
        }
    }
    return result;
}