void expectation_prop::train(size_t numTopics) { this->numTopics = numTopics; setup_parameters(); double likelihood; double old_likelihood = 0.0; int iteration = 0; bool converged = false; while(!converged && iteration < MAX_ITERATION){ iteration++; likelihood = 0.0; for(int d=0; d<numDocs; ++d){ likelihood += doc_e_step(d); } m_step(); double conv = fabs((old_likelihood - likelihood)/old_likelihood); old_likelihood = likelihood; if(conv < CONV_THRESHHOLD){ converged = true; } first = false; std::cout << "Iteration " << iteration << ": with likelihood: " << likelihood <<std::endl; } }
CDataContainer CIntTypeInfo::serialize() const { CDataContainer serializedData; if (m_min.isDefined()) serializedData.set("min", m_min()); if (m_max.isDefined()) serializedData.set("max", m_max()); if (m_step.isDefined()) serializedData.set("step", m_step()); return serializedData; }
void CPDNRigid<Scalar, Dim>::compute() { size_t iter_num = 0; Scalar e_tol = 10 + this->e_tol_; Scalar e = 0; this->normalize(); initialization(); /*if (this->_vision) { RenderThread<Scalar, Dim>::instance()->updateModel(this->model_); RenderThread<Scalar, Dim>::instance()->updateData(this->data_); RenderThread<Scalar, Dim>::instance()->startThread(); }*/ while (iter_num < this->iter_num_ && e_tol > this->e_tol_ && paras_.sigma2_ > 10 * this->v_tol_) { e_step(); Scalar old_e = e; e = energy(); e += paras_.lambda_/2 * (paras_.W_.transpose()*G_*paras_.W_).trace(); e_tol = fabs((e - old_e) / e); m_step(); /*if (this->_vision == true) RenderThread<Scalar, Dim>::instance()->updateModel(this->T_);*/ iter_num ++; } correspondences(); this->updateModel(); this->denormalize(); this->rewriteOriginalSource(); /*RenderThread<Scalar, Dim>::instance()->cancel();*/ }
void FastOnlineSupervisedMStep<Scalar>::doc_m_step( const std::shared_ptr<corpus::Document> doc, const std::shared_ptr<parameters::Parameters> v_parameters, std::shared_ptr<parameters::Parameters> m_parameters ) { // Data from document doc const Eigen::VectorXi & X = doc->get_words(); int y = std::static_pointer_cast<corpus::ClassificationDocument>(doc)->get_class(); // Variational parameters const MatrixX & phi = std::static_pointer_cast<parameters::VariationalParameters<Scalar> >(v_parameters)->phi; const VectorX &gamma = std::static_pointer_cast<parameters::VariationalParameters<Scalar> >(v_parameters)->gamma; // Supervised model parameters const VectorX &alpha = std::static_pointer_cast<parameters::SupervisedModelParameters<Scalar> >(m_parameters)->alpha; // Initialize our variables if (b_.rows() == 0) { b_ = MatrixX::Zero(phi.rows(), phi.cols()); expected_z_bar_ = MatrixX::Zero(phi.rows(), minibatch_size_); y_ = Eigen::VectorXi::Zero(minibatch_size_); eta_velocity_ = MatrixX::Zero(phi.rows(), num_classes_); eta_gradient_ = MatrixX::Zero(phi.rows(), num_classes_); } // Unsupervised sufficient statistics b_.array() += phi.array().rowwise() * X.cast<Scalar>().transpose().array(); // Supervised suff stats expected_z_bar_.col(docs_seen_so_far_) = gamma - alpha; expected_z_bar_.col(docs_seen_so_far_).array() /= expected_z_bar_.col(docs_seen_so_far_).sum(); y_(docs_seen_so_far_) = y; // mark another document as seen docs_seen_so_far_++; // Check if we need to update the parameters if (docs_seen_so_far_ >= minibatch_size_) m_step(m_parameters); }
void CPDNRigid<T, D>::run() { size_t iter_num = 0; T e_tol = 10 + _e_tol; T e = 0; normalize(); initialization(); if (_vision) { RenderThread<T, D>::instance()->updateModel(_model); RenderThread<T, D>::instance()->updateData(_data); RenderThread<T, D>::instance()->startThread(); } while (iter_num < _iter_num && e_tol > _e_tol && _paras._sigma2 > 10 * _v_tol) { e_step(); T old_e = e; e = energy(); e += _paras._lambda/2 * (_paras._W.transpose()*_G*_paras._W).trace(); e_tol = abs((e - old_e) / e); m_step(); if (_vision == true) RenderThread<T, D>::instance()->updateModel(_T); iter_num ++; } correspondences(); updateModel(); denormalize(); RenderThread<T, D>::instance()->cancel(); }
void Kmeans::cluster(const MatrixXdRowMajor& data_points, int k) { init_centroids(data_points,k); int i=1; tiempo_promedio_iteracion = 0.0; tiempo_promedio_e_step = 0.0; tiempo_promedio_m_step = 0.0; double tiempo_parcial; do { std::cout << "Iteration: " << i++; changed_centroids = 0; error = 0.0; utils.tic("Iteration"); utils.tic("E"); #ifdef MIC mic_e_step(data_points); #else e_step(data_points); #endif tiempo_parcial = utils.toc("E"); tiempo_promedio_e_step += tiempo_parcial; std::cout << "\tE: " << tiempo_parcial << "(" << (tiempo_promedio_e_step/(i-1)) << ")"; utils.tic("M"); m_step(data_points); tiempo_parcial = utils.toc("M"); tiempo_promedio_m_step += tiempo_parcial; std::cout << "\tM: " << tiempo_parcial << "(" << (tiempo_promedio_m_step/(i-1)) << ")"; tiempo_parcial = utils.toc("Iteration"); tiempo_promedio_iteracion += tiempo_parcial; std::cout << "\tI: " << tiempo_parcial << "(" << (tiempo_promedio_iteracion/(i-1)) << ")"; std::cout<<"\tchanged: "<<changed_centroids<<"\terror: "<<error<<std::endl; } while (changed_centroids > 0); std::cout << "Total Execution time: " << tiempo_promedio_iteracion << std::endl; std::cout << "Total E_STEP time: " << tiempo_promedio_e_step << std::endl; std::cout << "Total M_STEP time: " << tiempo_promedio_m_step << std::endl; }
void step() { m_step(); }