Пример #1
0
std::vector<double>
CostFuncLeastSquares::getFitWeights(API::FunctionValues_sptr values) const {
  std::vector<double> weights(values->size());
  for (size_t i = 0; i < weights.size(); ++i) {
    weights[i] = values->getFitWeight(i);
  }

  return weights;
}
Пример #2
0
/**
 * Calculate the value of a least squares cost function
 * @param leastSquares :: The least squares cost func to calculate the value for
 */
void SeqDomain::leastSquaresVal(const CostFuncLeastSquares& leastSquares)
{
  API::FunctionDomain_sptr domain;
  API::FunctionValues_sptr values;
  const size_t n = getNDomains();
  for(size_t i = 0; i < n; ++i)
  {
    values.reset();
    getDomainAndValues( i, domain, values );
    if (!values)
    {
      throw std::runtime_error("LeastSquares: undefined FunctionValues.");
    }
    leastSquares.addVal( domain, values );
  }
}
Пример #3
0
/**
 * Calculate the value, first and second derivatives of a RWP cost function
 * @param rwp :: The rwp cost func to calculate the value for
 * @param evalFunction :: Flag to evaluate the value of the cost function
 * @param evalDeriv :: Flag to evaluate the first derivatives
 * @param evalHessian :: Flag to evaluate the Hessian (second derivatives)
 */
void SeqDomain::rwpValDerivHessian(const CostFuncRwp& rwp, bool evalFunction, bool evalDeriv, bool evalHessian)
{
  API::FunctionDomain_sptr domain;
  API::FunctionValues_sptr values;
  const size_t n = getNDomains();
  for(size_t i = 0; i < n; ++i)
  {
    values.reset();
    getDomainAndValues( i, domain, values );
    if (!values)
    {
      throw std::runtime_error("Rwp: undefined FunctionValues.");
    }
    rwp.addValDerivHessian(rwp.getFittingFunction(),domain,values,evalFunction,evalDeriv,evalHessian);
  }
}
Пример #4
0
/**
 * Calculate the value of a least squares cost function
 * @param rwp :: The RWP cost func to calculate the value for
 */
void SeqDomain::rwpVal(const CostFuncRwp& rwp)
{
  API::FunctionDomain_sptr domain;
  API::FunctionValues_sptr values;
  const size_t n = getNDomains();
  for(size_t i = 0; i < n; ++i)
  {
    values.reset();
    getDomainAndValues( i, domain, values );
    if (!values)
    {
      throw std::runtime_error("Rwp: undefined FunctionValues.");
    }
    rwp.addVal( domain, values );
  }
}
Пример #5
0
/** Set fitting function, domain it will operate on, and container for values.
 * @param function :: The fitting function.
 * @param domain :: The domain for the function.
 * @param values :: The FunctionValues object which receives the calculated
 * values and
 *  also contains the data to fit to and the fitting weights (reciprocal
 * errors).
 */
void CostFuncFitting::setFittingFunction(API::IFunction_sptr function,
                                         API::FunctionDomain_sptr domain,
                                         API::FunctionValues_sptr values) {
  if (domain->size() != values->size()) {
    throw std::runtime_error(
        "Function domain and values objects are incompatible.");
  }
  m_function = function;
  m_domain = domain;
  m_values = values;
  m_indexMap.clear();
  for (size_t i = 0; i < m_function->nParams(); ++i) {
    if (m_function->isActive(i)) {
      m_indexMap.push_back(i);
    }
    API::IConstraint *c = m_function->getConstraint(i);
    if (c) {
      c->setParamToSatisfyConstraint();
    }
  }
}
Пример #6
0
//----------------------------------------------------------------------------------------------
/// Examine the chi squared as a function of fitting parameters and estimate
/// errors for each parameter.
void CalculateChiSquared::estimateErrors() {
  // Number of fiting parameters
  auto nParams = m_function->nParams();
  // Create an output table for displaying slices of the chi squared and
  // the probabilitydensity function
  auto pdfTable = API::WorkspaceFactory::Instance().createTable();

  std::string baseName = getProperty("Output");
  if (baseName.empty()) {
    baseName = "CalculateChiSquared";
  }
  declareProperty(new API::WorkspaceProperty<API::ITableWorkspace>(
                      "PDFs", "", Kernel::Direction::Output),
                  "The name of the TableWorkspace in which to store the "
                  "pdfs of fit parameters");
  setPropertyValue("PDFs", baseName + "_pdf");
  setProperty("PDFs", pdfTable);

  // Create an output table for displaying the parameter errors.
  auto errorsTable = API::WorkspaceFactory::Instance().createTable();
  auto nameColumn = errorsTable->addColumn("str", "Parameter");
  auto valueColumn = errorsTable->addColumn("double", "Value");
  auto minValueColumn = errorsTable->addColumn("double", "Value at Min");
  auto leftErrColumn = errorsTable->addColumn("double", "Left Error");
  auto rightErrColumn = errorsTable->addColumn("double", "Right Error");
  auto quadraticErrColumn = errorsTable->addColumn("double", "Quadratic Error");
  auto chiMinColumn = errorsTable->addColumn("double", "Chi2 Min");
  errorsTable->setRowCount(nParams);
  declareProperty(new API::WorkspaceProperty<API::ITableWorkspace>(
                      "Errors", "", Kernel::Direction::Output),
                  "The name of the TableWorkspace in which to store the "
                  "values and errors of fit parameters");
  setPropertyValue("Errors", baseName + "_errors");
  setProperty("Errors", errorsTable);

  // Calculate initial values
  double chiSquared = 0.0;
  double chiSquaredWeighted = 0.0;
  double dof = 0;
  API::FunctionDomain_sptr domain;
  API::FunctionValues_sptr values;
  m_domainCreator->createDomain(domain, values);
  calcChiSquared(*m_function, nParams, *domain, *values, chiSquared,
                 chiSquaredWeighted, dof);
  // Value of chi squared for current parameters in m_function
  double chi0 = chiSquared;
  // Fit data variance
  double sigma2 = chiSquared / dof;
  bool useWeighted = getProperty("Weighted");

  if (useWeighted) {
    chi0 = chiSquaredWeighted;
    sigma2 = 0.0;
  }

  if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
    g_log.debug() << "chi0=" << chi0 << std::endl;
    g_log.debug() << "sigma2=" << sigma2 << std::endl;
    g_log.debug() << "dof=" << dof << std::endl;
  }

  // Parameter bounds that define a volume in the parameter
  // space within which the chi squared is being examined.
  GSLVector lBounds(nParams);
  GSLVector rBounds(nParams);

  // Number of points in lines for plotting
  size_t n = 100;
  pdfTable->setRowCount(n);
  const double fac = 1e-4;

  // Loop over each parameter
  for (size_t ip = 0; ip < nParams; ++ip) {

    // Add columns for the parameter to the pdf table.
    auto parName = m_function->parameterName(ip);
    nameColumn->read(ip, parName);
    // Parameter values
    auto col1 = pdfTable->addColumn("double", parName);
    col1->setPlotType(1);
    // Chi squared values
    auto col2 = pdfTable->addColumn("double", parName + "_chi2");
    col2->setPlotType(2);
    // PDF values
    auto col3 = pdfTable->addColumn("double", parName + "_pdf");
    col3->setPlotType(2);

    double par0 = m_function->getParameter(ip);
    double shift = fabs(par0 * fac);
    if (shift == 0.0) {
      shift = fac;
    }

    // Make a slice along this parameter
    GSLVector dir(nParams);
    dir.zero();
    dir[ip] = 1.0;
    ChiSlice slice(*m_function, dir, *domain, *values, chi0, sigma2);

    // Find the bounds withn which the PDF is significantly above zero.
    // The bounds are defined relative to par0:
    //   par0 + lBound is the lowest value of the parameter (lBound <= 0)
    //   par0 + rBound is the highest value of the parameter (rBound >= 0)
    double lBound = slice.findBound(-shift);
    double rBound = slice.findBound(shift);
    lBounds[ip] = lBound;
    rBounds[ip] = rBound;

    // Approximate the slice with a polynomial.
    // P is a vector of values of the polynomial at special points.
    // A is a vector of Chebyshev expansion coefficients.
    // The polynomial is defined on interval [lBound, rBound]
    // The value of the polynomial at 0 == chi squared at par0
    std::vector<double> P, A;
    bool ok = true;
    auto base = slice.makeApprox(lBound, rBound, P, A, ok);
    if (!ok) {
      g_log.warning() << "Approximation failed for parameter " << ip << std::endl;
    }
    if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
      g_log.debug() << "Parameter " << ip << std::endl;
      g_log.debug() << "Slice approximated by polynomial of order "
                    << base->size() - 1;
      g_log.debug() << " between " << lBound << " and " << rBound << std::endl;
    }

    // Write n slice points into the output table.
    double dp = (rBound - lBound) / static_cast<double>(n);
    for (size_t i = 0; i < n; ++i) {
      double par = lBound + dp * static_cast<double>(i);
      double chi = base->eval(par, P);
      col1->fromDouble(i, par0 + par);
      col2->fromDouble(i, chi);
    }

    // Check if par0 is a minimum point of the chi squared
    std::vector<double> AD;
    // Calculate the derivative polynomial.
    // AD are the Chebyshev expansion of the derivative.
    base->derivative(A, AD);
    // Find the roots of the derivative polynomial
    std::vector<double> minima = base->roots(AD);
    if (minima.empty()) {
      minima.push_back(par0);
    }

    if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
      g_log.debug() << "Minima: ";
    }

    // If only 1 extremum is found assume (without checking) that it's a
    // minimum.
    // If there are more than 1, find the one with the smallest chi^2.
    double chiMin = std::numeric_limits<double>::max();
    double parMin = par0;
    for (size_t i = 0; i < minima.size(); ++i) {
      double value = base->eval(minima[i], P);
      if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
        g_log.debug() << minima[i] << " (" << value << ") ";
      }
      if (value < chiMin) {
        chiMin = value;
        parMin = minima[i];
      }
    }
    if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
      g_log.debug() << std::endl;
      g_log.debug() << "Smallest minimum at " << parMin << " is " << chiMin
                    << std::endl;
    }

    // Points of intersections with line chi^2 = 1/2 give an estimate of
    // the standard deviation of this parameter if it's uncorrelated with the
    // others.
    A[0] -= 0.5; // Now A are the coefficients of the original polynomial
                 // shifted down by 1/2.
    std::vector<double> roots = base->roots(A);
    std::sort(roots.begin(), roots.end());

    if (roots.empty()) {
      // Something went wrong; use the whole interval.
      roots.resize(2);
      roots[0] = lBound;
      roots[1] = rBound;
    } else if (roots.size() == 1) {
      // Only one root found; use a bound for the other root.
      if (roots.front() < 0) {
        roots.push_back(rBound);
      } else {
        roots.insert(roots.begin(), lBound);
      }
    } else if (roots.size() > 2) {
      // More than 2 roots; use the smallest and the biggest
      auto smallest = roots.front();
      auto biggest = roots.back();
      roots.resize(2);
      roots[0] = smallest;
      roots[1] = biggest;
    }

    if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
      g_log.debug() << "Roots: ";
      for (size_t i = 0; i < roots.size(); ++i) {
        g_log.debug() << roots[i] << ' ';
      }
      g_log.debug() << std::endl;
    }

    // Output parameter info to the table.
    valueColumn->fromDouble(ip, par0);
    minValueColumn->fromDouble(ip, par0 + parMin);
    leftErrColumn->fromDouble(ip, roots[0] - parMin);
    rightErrColumn->fromDouble(ip, roots[1] - parMin);
    chiMinColumn->fromDouble(ip, chiMin);

    // Output the PDF
    for (size_t i = 0; i < n; ++i) {
      double chi = col2->toDouble(i);
      col3->fromDouble(i, exp(-chi + chiMin));
    }

    // make sure function parameters don't change.
    m_function->setParameter(ip, par0);
  }

  // Improve estimates for standard deviations.
  // If parameters are correlated the found deviations
  // most likely underestimate the true values.
  unfixParameters();
  GSLJacobian J(m_function, values->size());
  m_function->functionDeriv(*domain, J);
  refixParameters();
  // Calculate the hessian at the current point.
  GSLMatrix H;
  if (useWeighted) {
    H.resize(nParams, nParams);
    for (size_t i = 0; i < nParams; ++i) {
      for (size_t j = i; j < nParams; ++j) {
        double h = 0.0;
        for (size_t k = 0; k < values->size(); ++k) {
          double w = values->getFitWeight(k);
          h += J.get(k, i) * J.get(k, j) * w * w;
        }
        H.set(i, j, h);
        if (i != j) {
          H.set(j, i, h);
        }
      }
    }
  } else {
    H = Tr(J.matrix()) * J.matrix();
  }
  // Square roots of the diagonals of the covariance matrix give
  // the standard deviations in the quadratic approximation of the chi^2.
  GSLMatrix V(H);
  if (!useWeighted) {
    V *= 1. / sigma2;
  }
  V.invert();
  // In a non-quadratic asymmetric case the following procedure can give a
  // better result:
  // Find the direction in which the chi^2 changes slowest and the positive and
  // negative deviations in that direction. The change in a parameter at those
  // points can be a better estimate for the standard deviation.
  GSLVector v(nParams);
  GSLMatrix Q(nParams, nParams);
  // One of the eigenvectors of the hessian is the direction of the slowest
  // change.
  H.eigenSystem(v, Q);

  // Loop over the eigenvectors
  for (size_t i = 0; i < nParams; ++i) {
    auto dir = Q.copyColumn(i);
    if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
      g_log.debug() << "Direction " << i << std::endl;
      g_log.debug() << dir << std::endl;
    }
    // Make a slice in that direction
    ChiSlice slice(*m_function, dir, *domain, *values, chi0, sigma2);
    double rBound0 = dir.dot(rBounds);
    double lBound0 = dir.dot(lBounds);
    if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
      g_log.debug() << "lBound " << lBound0 << std::endl;
      g_log.debug() << "rBound " << rBound0 << std::endl;
    }
    double lBound = slice.findBound(lBound0);
    double rBound = slice.findBound(rBound0);
    std::vector<double> P, A;
    // Use a polynomial approximation
    bool ok = true;
    auto base = slice.makeApprox(lBound, rBound, P, A, ok);
    if (!ok) {
      g_log.warning() << "Approximation failed in direction " << i << std::endl;
    }
    // Find the deviation points where the chi^2 = 1/2
    A[0] -= 0.5;
    std::vector<double> roots = base->roots(A);
    std::sort(roots.begin(), roots.end());
    // Sort out the roots
    auto nRoots = roots.size();
    if (nRoots == 0) {
      roots.resize(2, 0.0);
    } else if (nRoots == 1) {
      if (roots.front() > 0.0) {
        roots.insert(roots.begin(), 0.0);
      } else {
        roots.push_back(0.0);
      }
    } else if (nRoots > 2) {
      roots[1] = roots.back();
      roots.resize(2);
    }
    if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
      g_log.debug() << "Roots " << roots[0] << " (" << slice(roots[0]) << ") " << roots[1] << " (" << slice(roots[1]) << ") " << std::endl;
    }
    // Loop over the parameters and see if there deviations along
    // this direction is greater than any previous value.
    for (size_t ip = 0; ip < nParams; ++ip) {
      auto lError = roots.front() * dir[ip];
      auto rError = roots.back() * dir[ip];
      if (lError > rError) {
        std::swap(lError, rError);
      }
      if (lError < leftErrColumn->toDouble(ip)) {
        if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
          g_log.debug() << "  left for  " << ip << ' ' << lError << ' ' << leftErrColumn->toDouble(ip) << std::endl;
        }
        leftErrColumn->fromDouble(ip, lError);
      }
      if (rError > rightErrColumn->toDouble(ip)) {
        if (g_log.is(Kernel::Logger::Priority::PRIO_DEBUG)) {
          g_log.debug() << "  right for " << ip << ' ' << rError << ' ' << rightErrColumn->toDouble(ip) << std::endl;
        }
        rightErrColumn->fromDouble(ip, rError);
      }
    }
    // Output the quadratic estimate for comparrison.
    quadraticErrColumn->fromDouble(i, sqrt(V.get(i, i)));
  }
}
Пример #7
0
/**
 * Update the cost function, derivatives and hessian by adding values calculated
 * on a domain.
 * @param function :: Function to use to calculate the value and the derivatives
 * @param domain :: The domain.
 * @param values :: The fit function values
 * @param evalFunction :: Flag to evaluate the function
 * @param evalDeriv :: Flag to evaluate the derivatives
 * @param evalHessian :: Flag to evaluate the Hessian
 */
void CostFuncLeastSquares::addValDerivHessian(
  API::IFunction_sptr function,
  API::FunctionDomain_sptr domain,
  API::FunctionValues_sptr values,
  bool evalFunction , bool evalDeriv, bool evalHessian) const
{
  UNUSED_ARG(evalDeriv);
  size_t np = function->nParams();  // number of parameters 
  size_t ny = domain->size(); // number of data points
  Jacobian jacobian(ny,np);
  if (evalFunction)
  {
    function->function(*domain,*values);
  }
  function->functionDeriv(*domain,jacobian);

  size_t iActiveP = 0;
  double fVal = 0.0;
  if (debug)
  {
    std::cerr << "Jacobian:\n";
    for(size_t i = 0; i < ny; ++i)
    {
      for(size_t ip = 0; ip < np; ++ip)
      {
        if ( !m_function->isActive(ip) ) continue;
        std::cerr << jacobian.get(i,ip) << ' ';
      }
      std::cerr << std::endl;
    }
  }
  for(size_t ip = 0; ip < np; ++ip)
  {
    if ( !function->isActive(ip) ) continue;
    double d = 0.0;
    for(size_t i = 0; i < ny; ++i)
    {
      double calc = values->getCalculated(i);
      double obs = values->getFitData(i);
      double w = values->getFitWeight(i);
      double y = ( calc - obs ) * w;
      d += y * jacobian.get(i,ip) * w;
      if (iActiveP == 0 && evalFunction)
      {
        fVal += y * y;
      }
    }
    PARALLEL_CRITICAL(der_set)
    {
      double der = m_der.get(iActiveP);
      m_der.set(iActiveP, der + d);
    }
    //std::cerr << "der " << ip << ' ' << der[iActiveP] << std::endl;
    ++iActiveP;
  }

  if (evalFunction)
  {
    PARALLEL_ATOMIC
    m_value += 0.5 * fVal;
  }

  if (!evalHessian) return;

  //size_t na = m_der.size(); // number of active parameters

  size_t i1 = 0; // active parameter index
  for(size_t i = 0; i < np; ++i) // over parameters
  {
    if ( !function->isActive(i) ) continue;
    size_t i2 = 0; // active parameter index
    for(size_t j = 0; j <= i; ++j) // over ~ half of parameters
    {
      if ( !function->isActive(j) ) continue;
      double d = 0.0;
      for(size_t k = 0; k < ny; ++k) // over fitting data
      {
        double w = values->getFitWeight(k);
        d += jacobian.get(k,i) * jacobian.get(k,j) * w * w;
      }
      PARALLEL_CRITICAL(hessian_set)
      {
        double h = m_hessian.get(i1,i2);
        m_hessian.set(i1,i2, h + d);
        //std::cerr << "hess " << i1 << ' ' << i2 << std::endl;
        if (i1 != i2)
        {
          m_hessian.set(i2,i1,h + d);
        }
      }
      ++i2;
    }
    ++i1;
  }
}
Пример #8
0
/**
 * Update the cost function, derivatives and hessian by adding values calculated
 * on a domain.
 * @param function :: Function to use to calculate the value and the derivatives
 * @param domain :: The domain.
 * @param values :: The fit function values
 * @param evalDeriv :: Flag to evaluate the derivatives
 * @param evalHessian :: Flag to evaluate the Hessian
 */
void CostFuncLeastSquares::addValDerivHessian(API::IFunction_sptr function,
                                              API::FunctionDomain_sptr domain,
                                              API::FunctionValues_sptr values,
                                              bool evalDeriv,
                                              bool evalHessian) const {
  UNUSED_ARG(evalDeriv);
  function->function(*domain, *values);
  size_t np = function->nParams(); // number of parameters
  size_t ny = values->size();      // number of data points
  Jacobian jacobian(ny, np);
  function->functionDeriv(*domain, jacobian);

  size_t iActiveP = 0;
  double fVal = 0.0;
  std::vector<double> weights = getFitWeights(values);

  for (size_t ip = 0; ip < np; ++ip) {
    if (!function->isActive(ip))
      continue;
    double d = 0.0;
    for (size_t i = 0; i < ny; ++i) {
      double calc = values->getCalculated(i);
      double obs = values->getFitData(i);
      double w = weights[i];
      double y = (calc - obs) * w;
      d += y * jacobian.get(i, ip) * w;
      if (iActiveP == 0) {
        fVal += y * y;
      }
    }
    PARALLEL_CRITICAL(der_set) {
      double der = m_der.get(iActiveP);
      m_der.set(iActiveP, der + d);
    }
    ++iActiveP;
  }

  PARALLEL_ATOMIC
  m_value += 0.5 * fVal;

  if (!evalHessian)
    return;

  size_t i1 = 0;                  // active parameter index
  for (size_t i = 0; i < np; ++i) // over parameters
  {
    if (!function->isActive(i))
      continue;
    size_t i2 = 0;                  // active parameter index
    for (size_t j = 0; j <= i; ++j) // over ~ half of parameters
    {
      if (!function->isActive(j))
        continue;
      double d = 0.0;
      for (size_t k = 0; k < ny; ++k) // over fitting data
      {
        double w = weights[k];
        d += jacobian.get(k, i) * jacobian.get(k, j) * w * w;
      }
      PARALLEL_CRITICAL(hessian_set) {
        double h = m_hessian.get(i1, i2);
        m_hessian.set(i1, i2, h + d);
        if (i1 != i2) {
          m_hessian.set(i2, i1, h + d);
        }
      }
      ++i2;
    }
    ++i1;
  }
}
/// Return unit weights for all data points.
std::vector<double> CostFuncUnweightedLeastSquares::getFitWeights(
    API::FunctionValues_sptr values) const {
  return std::vector<double>(values->size(), 1.0);
}