/// /// \brief minimize starting from the initial guess x0 /// tstate operator()(const tproblem& problem, const tvector& x0) const { assert(problem.size() == static_cast<tsize>(x0.size())); // current state tstate cstate(problem, x0); // running-weighted-averaged-per-dimension-squared gradient average_vector_t<tscalar, tvector> gavg(x0.size()); for (tsize e = 0, k = 1; e < base_t::m_epochs; e ++) { for (tsize i = 0; i < base_t::m_epoch_size; i ++, k ++) { // learning rate const tscalar alpha = base_t::m_alpha0; // descent direction gavg.update(cstate.g.array().square(), base_t::weight(k)); cstate.d = -cstate.g.array() / (base_t::m_epsilon + gavg.value().array()).sqrt(); // update solution cstate.update(problem, alpha); } base_t::ulog(cstate); } return cstate; }
QVariant ActivityItem::data(int role) const { switch(role) { case CodeRole: return code(); case NameRole: return name(); case IconRole: return icon(); case CStateRole: return cstate(); case BackgroundRole: return backgrounds(); case OrderRole: return order(); default: return QVariant(); } }