void parameters::Likelihood(const VectorXd & eff){ VectorXd loc; loc.resize(m_proba.rows()); loc=(m_proba.rowwise().sum().array().log()); m_loglikelihood=eff.transpose()*loc; m_bic=m_loglikelihood - 0.5*m_nbparam*log(eff.sum()); }
double unimcd_in( const VectorXd& m_resd, const int& h ){ const int n1=m_resd.size(),len=n1-h+1; double initmean=0.0,initcov=0.0,sumw=0.0; int minone; if(h==n1){ initmean=m_resd.sum()/(double)h; initcov=(m_resd.array()-initmean).abs2().sum()/(double)(h-1); return(sqrt(initcov)); } VectorXd y=m_resd; VectorXd ay(len); VectorXd ay2(len); VectorXd sq(len); VectorXd y2(n1); std::sort(y.data(),y.data()+y.size()); ay(0)=y.head(h).sum(); for(int samp=1;samp<len;samp++) ay(samp)=ay(samp-1)-y(samp-1)+y(samp+h-1); ay2=ay.array().square()/(double)h; y2=y.array().square(); sq(0)=y2.head(h).sum()-ay2(0); for(int samp=1;samp<len;samp++) sq(samp)=sq(samp-1)-y2(samp-1)+y2(samp+h-1)-ay2(samp)+ay2(samp-1); initcov=sq.minCoeff(&minone); initcov/=(double)(h-1); initmean=ay(minone)/(double)h; return(initmean); }
SpMat make_C(SpMat chol_K_inv,VectorXd h2s, SpMat ZtZ){ SpMat Ki = chol_K_inv.transpose() * chol_K_inv; SpMat C = ZtZ; C /= (1.0 - h2s.sum()); C += Ki; return C; }
void LoadMatrices(MatrixXd &A, VectorXd &W, VectorXd &p, string weight_type) { A = MatrixXd::Zero(n_n, n_n); MatrixXd Apn = MatrixXd::Zero(n_p, n_n); MatrixXd abs_Apn = MatrixXd::Zero(n_p, n_n); VectorXd k = VectorXd::Zero(n_n); p = VectorXd::Zero(n_n); W = VectorXd::Zero(n_p); // Topological incidence matrix for (unsigned int j = 0; j < n_p; j++) { int idx_n1 = wds->agelemek.at(pipe_idx.at(j))->Get_Cspe_Index(); int idx_n2 = wds->agelemek.at(pipe_idx.at(j))->Get_Cspv_Index(); Apn(j, idx_n1)++; Apn(j, idx_n2)--; abs_Apn(j, idx_n1)++; abs_Apn(j, idx_n2)++; } A = Apn.transpose() * Apn; for (unsigned i = 0; i < n_n; i++) { A(i, i) = 0; for (unsigned int j = 0; j < n_n; j++) { if (A(i, j) != 0) { A(i, j) = 1.; // k(i)++; } } } if (0 == strcmp(weight_type.c_str(), "topology")) { for (unsigned int i = 0; i < W.size(); i++) W(i) = 1; } if (0 == strcmp(weight_type.c_str(), "dp")) { double dp, dp_max = -1., weight_min = 0.0001; for (unsigned int i = 0; i < W.size(); i++) { // dp = wds->agelemek.at(pipe_idx.at(i))->Get_dprop("mass_flow_rate"); dp = wds->agelemek.at(pipe_idx.at(i))->Get_dprop("headloss"); // dp = wds->agelemek.at(pipe_idx.at(i))->Get_dprop("length"); W(i) = abs(dp); if (W(i) > dp_max) dp_max = W(i); //cout<<endl<<wds->agelemek.at(pipe_idx.at(i))->Get_nev()<<" dp="<<dp; } for (unsigned int i = 0; i < W.size(); i++) { W(i) /= dp_max; if (W(i) < weight_min) W(i) = weight_min; // cout<<endl<<wds->agelemek.at(pipe_idx.at(i))->Get_nev()<<" weight="<<W(i); } } // Final computations p = abs_Apn.transpose() * W; sumW = W.sum(); }
double entropy(const VectorXd &values) { MatrixXd fy = values.array() / values.sum(); double invlog = 1 / std::log(2); double epsilon = std::numeric_limits<double>::epsilon(); // h = sum(fy * log2(fy+epsilon)) double h = (fy.array() * (fy.array() + epsilon).log().array() * invlog).sum(); return (-1) * h; }
VectorXd Zscores2Post(VectorXd& Zs){ VectorXd post(Zs.size()); VectorXd Zsq = Zs.array().square(); for(int i = 0; i < Zsq.size(); i ++){ VectorXd Ztemp = (Zsq.array() - Zsq[i])/2; VectorXd Zexp = Ztemp.array().exp(); post[i] = 1/Zexp.sum(); } return(post); }
SpMat make_Sigma(std::vector<SpMat> ZKZts, VectorXd h2s,double tol){ int n = ZKZts[0].rows(); int h = h2s.size(); MatrixXd R(n,n); R.setZero(); for(int i = 0; i < h; i++){ R += h2s[i] * ZKZts[i]; } R.diagonal().array() += (1.0-h2s.sum()); return R.sparseView(0,tol); }
void PF::propagate(VectorXd &particles, VectorXd &weights, int t) { for(int i = 0; i < N; i++) { double p = particles[i]; boost::normal_distribution<> f_rng(p, f_std); // sample from f = q particles[i] = f_rng(rng); boost::math::normal g(p, g_std); weights[i] *= pdf(g, y[t]); } weights /= weights.sum(); }
// exponentiates and normalizes a vector void expAndNormalize(VectorXd& v) { if (v.size() == 0) return; double maxValue = v[0]; for (int i = 1; i < v.size(); i++) { if (v[i] > maxValue) maxValue = v[i]; } v = (v.cwise() - maxValue).cwise().exp(); double Z = v.sum(); v /= Z; }
int argrand(const VectorXd &v) { double cutoff = v.sum() * rand() / (double)RAND_MAX; double cumSum = 0.0; for (int i = 0; i < v.size(); i++) { cumSum += v[i]; if (cumSum >= cutoff) { return i; } } SVL_LOG(SVL_LOG_FATAL, "bug"); return -1; }
// If there are two draws and the first has two tiles and the second -- three tiles, // then sizes = c(2,3) and array = c(m_{1t_1},m_{1t_2},m_{2t_1},m_{2t_2},m_{2t_3}) bool dlmcell(const string &filename, const VectorXd &sizes, const vector<double> &array) { bool error = false; if (array.size()!=sizes.sum()) { error = true; return(error); } ofstream out(filename.c_str(),ofstream::out); if (!out.is_open()) { error = true; return(error); } vector<double>::const_iterator it = array.begin(); for ( int i = 0 ; i < sizes.size() ; i++ ) { for ( int j = 0 ; j < sizes(i) ; j++ ) { out << fixed << setprecision(6) << *it << " "; it++; } out << endl; } out.close( ); return(error); }
/* The Variational Bayes Expectation step for each group. * * mutable: Group assignment probabilities, qZj * returns: The complete-data (X,Z) free energy E[log p(X,Z)/q(Z)] for group j. * throws: invalid_argument rethrown from other functions. */ template <class W, class C> double vbexpectation ( const MatrixXd& Xj, // Observations in group J const W& weights, // Group Weight parameter distribution const vector<C>& clusters, // Cluster parameter distributions MatrixXd& qZj, // Observations to group mixture assignments const bool sparse // Do sparse updates to groups ) { const int K = clusters.size(), Nj = Xj.rows(); // Get log marginal weight likelihoods const ArrayXd E_logZ = weights.Elogweight(); // Initialise and set K = 1 defaults for cluster counts ArrayXi Kful = ArrayXi::Zero(1), Kemp = ArrayXi::Zero(0); // Find empty clusters if sparse if ( (sparse == false) && (K > 1) ) Kful = ArrayXi::LinSpaced(Sequential, K, 0, K-1); else if (sparse == true) arrfind((weights.getNk() >= ZEROCUTOFF), Kful, Kemp); const int nKful = Kful.size(), nKemp = Kemp.size(); // Find Expectations of log joint observation probs -- allow sparse evaluation MatrixXd logqZj(Nj, nKful); for (int k = 0; k < nKful; ++k) logqZj.col(k) = E_logZ(Kful(k)) + clusters[Kful(k)].Eloglike(Xj).array(); // Log normalisation constant of log observation likelihoods const VectorXd logZzj = logsumexp(logqZj); // Make sure qZ is the right size, this is a nop if it is qZj.resize(Nj, K); // Normalise and Compute Responsibilities -- again allow sparse evaluation for (int k = 0; k < nKful; ++k) qZj.col(Kful(k)) = ((logqZj.col(k) - logZzj).array().exp()).matrix(); // Empty Cluster Responsabilities for (int k = 0; k < nKemp; ++k) qZj.col(Kemp(k)).setZero(); return -logZzj.sum(); }
GTEST_TEST(TestConvexHull, testRandomConvexCombinations) { // Generate a set of points, then find a random convex combination of those // points, and verify that it's correctly reported as being inside the // convex hull for (int i = 2; i < 50; ++i) { for (int j = 0; j < 500; ++j) { MatrixXd pts = MatrixXd::Random(2, i); VectorXd weights = VectorXd::Random(i); if (weights.minCoeff() < 0) { weights = weights.array() - weights.minCoeff(); // make sure they're all nonnegative } weights = weights.array() / weights.sum(); Vector2d q = pts * weights; EXPECT_TRUE(inConvexHull(pts, q, 1e-8)); } } }
void UpdaterMean::costsToWeights(const VectorXd& costs, string weighting_method, double eliteness, VectorXd& weights) const { weights.resize(costs.size()); if (weighting_method.compare("PI-BB")==0) { // PI^2 style weighting: continuous, cost exponention double h = eliteness; // In PI^2, eliteness parameter is known as "h" double range = costs.maxCoeff()-costs.minCoeff(); if (range==0) weights.fill(1); else weights = (-h*(costs.array()-costs.minCoeff())/range).exp(); } else if (weighting_method.compare("CMA-ES")==0 || weighting_method.compare("CEM")==0 ) { // CMA-ES and CEM are rank-based, so we must first sort the costs, and the assign a weight to // each rank. VectorXd costs_sorted = costs; std::sort(costs_sorted.data(), costs_sorted.data()+costs_sorted.size()); // In Python this is more elegant because we have argsort. // indices = np.argsort(costs) // It is possible to do this with fancy lambda functions or std::pair in C++ too, but I don't // mind writing two for loops instead ;-) weights.fill(0.0); int mu = eliteness; // In CMA-ES, eliteness parameter is known as "mu" assert(mu<costs.size()); for (int ii=0; ii<mu; ii++) { double cur_cost = costs_sorted[ii]; for (int jj=0; jj<costs.size(); jj++) { if (costs[jj] == cur_cost) { if (weighting_method.compare("CEM")==0) weights[jj] = 1.0/mu; // CEM else weights[jj] = log(mu+0.5) - log(ii+1); // CMA-ES break; } } } // For debugging //MatrixXd print_mat(3,costs.size()); //print_mat.row(0) = costs_sorted; //print_mat.row(1) = costs; //print_mat.row(2) = weights; //cout << print_mat << endl; } else { cout << __FILE__ << ":" << __LINE__ << ":WARNING: Unknown weighting method '" << weighting_method << "'. Calling with PI-BB weighting." << endl; costsToWeights(costs, "PI-BB", eliteness, weights); return; } // Relative standard deviation of total costs double mean = weights.mean(); double std = sqrt((weights.array()-mean).pow(2).mean()); double rel_std = std/mean; if (rel_std<1e-10) { // Special case: all costs are the same // Set same weights for all. weights.fill(1); } // Normalize weights weights = weights/weights.sum(); }
double ave(VectorXd y) { const double n(y.size()); const double ySum(y.sum()); const double yAve(ySum/n); return(yAve); }
double CalcEuclidean(VectorXd &vec1 , VectorXd &vec2){ VectorXd diff = vec1 - vec2; VectorXd sq = diff.array().square(); return(sq.sum()); }
Matrix3d msac( const Eigen::Matrix2Xd& pointsFrom, const Eigen::Matrix2Xd& pointsTo, int maxNumTrials, double confidence, double maxDistance ) { double threshold = maxDistance; int numPts = pointsFrom.cols(); int idxTrial = 1; int numTrials = maxNumTrials; double maxDis = threshold * numPts; double bestDist = maxDis; Matrix3d bestT; bestT << 1, 0, 0, 0, 1, 0, 0, 0, 1; int index1; int index2; // Get two random, different numbers in [0:pointsFrom.cols()-1] std::uniform_int_distribution<int> distribution1( 0, pointsFrom.cols()-1 ); std::uniform_int_distribution<int> distribution2( 0, pointsFrom.cols()-2 ); while ( idxTrial <= numTrials ) { // Get two random, different numbers in [0:pointsFrom.cols()-1] index1 = distribution1( msacGenerator ); index2 = distribution2( msacGenerator ); if ( index2 >= index1 ) index2++; Vector2d indices( index1, index2 ); /*std::cout << "indices: " << indices.transpose() << " pointsFrom.cols: " << pointsFrom.cols() << " pointsTo.cols: " << pointsTo.cols() << std::endl;*/ // Get T form Calculated from this set of points Matrix3d T = computeTform( pointsFrom, pointsTo, indices ); VectorXd dis = evaluateTform( pointsFrom, pointsTo, T, threshold ); double accDis = dis.sum(); if ( accDis < bestDist ) { bestDist = accDis; bestT = T; } idxTrial++; } VectorXd dis = evaluateTform( pointsFrom, pointsTo, bestT, threshold ); threshold *= dis.mean(); int numInliers = 0; for ( int i = 0; i < dis.rows(); i++ ){ if ( dis(i) < threshold ) numInliers++; } VectorXd inliers( numInliers ); int j = 0; for ( int i = 0; i < dis.rows(); i++ ){ if ( dis(i) < threshold ) inliers(j++) = i; } Matrix3d T; if ( numInliers >= 2 ) T = computeTform( pointsFrom, pointsTo, inliers ); else T << 1, 0, 0, 0, 1, 0, 0, 0, 1; return T; }
double gini(const VectorXd &values) { double sum = values.sum(); VectorXd fy = values / sum; double g = 1 - fy.array().square().sum(); return g; }
int main(int argc, char *argv[]) { using namespace Eigen; using namespace std; // Load a mesh in OFF format igl::readOFF("../shared/cow.off", V, F); // Compute Laplace-Beltrami operator: #V by #V igl::cotmatrix(V,F,L); // Alternative construction of same Laplacian SparseMatrix<double> G,K; // Gradient/Divergence igl::grad(V,F,G); // Diagonal per-triangle "mass matrix" VectorXd dblA; igl::doublearea(V,F,dblA); // Place areas along diagonal #dim times const auto & T = 1.*(dblA.replicate(3,1)*0.5).asDiagonal(); // Laplacian K built as discrete divergence of gradient or equivalently // discrete Dirichelet energy Hessian K = -G.transpose() * T * G; cout<<"|K-L|: "<<(K-L).norm()<<endl; const auto &key_down = [](igl::Viewer &viewer,unsigned char key,int mod)->bool { switch(key) { case 'r': case 'R': U = V; break; case ' ': { // Recompute just mass matrix on each step SparseMatrix<double> M; igl::massmatrix(U,F,igl::MASSMATRIX_TYPE_BARYCENTRIC,M); // Solve (M-delta*L) U = M*U const auto & S = (M - 0.001*L); Eigen::SimplicialLLT<Eigen::SparseMatrix<double > > solver(S); assert(solver.info() == Eigen::Success); U = solver.solve(M*U).eval(); // Compute centroid and subtract (also important for numerics) VectorXd dblA; igl::doublearea(U,F,dblA); double area = 0.5*dblA.sum(); MatrixXd BC; igl::barycenter(U,F,BC); RowVector3d centroid(0,0,0); for(int i = 0;i<BC.rows();i++) { centroid += 0.5*dblA(i)/area*BC.row(i); } U.rowwise() -= centroid; // Normalize to unit surface area (important for numerics) U.array() /= sqrt(area); break; } default: return false; } // Send new positions, update normals, recenter viewer.data.set_vertices(U); viewer.data.compute_normals(); viewer.core.align_camera_center(U,F); return true; }; // Use original normals as pseudo-colors MatrixXd N; igl::per_vertex_normals(V,F,N); MatrixXd C = N.rowwise().normalized().array()*0.5+0.5; // Initialize smoothing with base mesh U = V; viewer.data.set_mesh(U, F); viewer.data.set_colors(C); viewer.callback_key_down = key_down; cout<<"Press [space] to smooth."<<endl;; cout<<"Press [r] to reset."<<endl;; return viewer.launch(); }
void CLBPInference::infer(CGraph &graph, map<size_t,VectorXd> &nodeBeliefs, map<size_t,MatrixXd> &edgeBeliefs, double &logZ) { // // Algorithm workflow: // 1. Compute the messages passed // 2. Compute node beliefs // 3. Compute edge beliefs // 4. Compute logZ // nodeBeliefs.clear(); edgeBeliefs.clear(); const vector<CNodePtr> nodes = graph.getNodes(); const vector<CEdgePtr> edges = graph.getEdges(); multimap<size_t,CEdgePtr> edges_f = graph.getEdgesF(); size_t N_nodes = nodes.size(); size_t N_edges = edges.size(); // // 1. Compute the messages passed // vector<vector<VectorXd> > messages; bool maximize = false; messagesLBP( graph, m_options, messages, maximize ); // // 2. Compute node beliefs // for ( size_t nodeIndex = 0; nodeIndex < N_nodes; nodeIndex++ ) { const CNodePtr nodePtr = graph.getNode( nodeIndex ); size_t nodeID = nodePtr->getID(); VectorXd nodePotPlusIncMsg = nodePtr->getPotentials( m_options.considerNodeFixedValues ); NEIGHBORS_IT neighbors = edges_f.equal_range(nodeID); // // Get the messages for all the neighbors, and multiply them with the node potential // for ( multimap<size_t,CEdgePtr>::iterator itNeigbhor = neighbors.first; itNeigbhor != neighbors.second; itNeigbhor++ ) { CEdgePtr edgePtr( (*itNeigbhor).second ); size_t edgeIndex = graph.getEdgeIndex( edgePtr->getID() ); if ( !edgePtr->getNodePosition( nodeID ) ) // nodeID is the first node in the edge nodePotPlusIncMsg = nodePotPlusIncMsg.cwiseProduct(messages[ edgeIndex ][ 1 ]); else // nodeID is the second node in the dege nodePotPlusIncMsg = nodePotPlusIncMsg.cwiseProduct(messages[ edgeIndex ][ 0 ]); } // Normalize nodePotPlusIncMsg = nodePotPlusIncMsg / nodePotPlusIncMsg.sum(); nodeBeliefs[ nodeID ] = nodePotPlusIncMsg; //cout << "Beliefs of node " << nodeIndex << endl << nodePotPlusIncMsg << endl; } // // 3. Compute edge beliefs // for ( size_t edgeIndex = 0; edgeIndex < N_edges; edgeIndex++ ) { CEdgePtr edgePtr = edges[edgeIndex]; size_t edgeID = edgePtr->getID(); size_t ID1, ID2; edgePtr->getNodesID( ID1, ID2 ); MatrixXd edgePotentials = edgePtr->getPotentials(); MatrixXd edgeBelief = edgePotentials; VectorXd &message1To2 = messages[edgeIndex][0]; VectorXd &message2To1 = messages[edgeIndex][1]; //cout << "----------------------" << endl; //cout << nodeBeliefs[ ID1 ] << endl; //cout << "----------------------" << endl; //cout << message2To1 << endl; VectorXd node1Belief = nodeBeliefs[ ID1 ].cwiseQuotient( message2To1 ); VectorXd node2Belief = nodeBeliefs[ ID2 ].cwiseQuotient( message1To2 ); //cout << "----------------------" << endl; MatrixXd node1BeliefMatrix ( edgePotentials.rows(), edgePotentials.cols() ); for ( size_t row = 0; row < edgePotentials.rows(); row++ ) for ( size_t col = 0; col < edgePotentials.cols(); col++ ) node1BeliefMatrix(row,col) = node1Belief(row); //cout << "Node 1 belief matrix: " << endl << node1BeliefMatrix << endl; edgeBelief = edgeBelief.cwiseProduct( node1BeliefMatrix ); MatrixXd node2BeliefMatrix ( edgePotentials.rows(), edgePotentials.cols() ); for ( size_t row = 0; row < edgePotentials.rows(); row++ ) for ( size_t col = 0; col < edgePotentials.cols(); col++ ) node2BeliefMatrix(row,col) = node2Belief(col); //cout << "Node 2 belief matrix: " << endl << node2BeliefMatrix << endl; edgeBelief = edgeBelief.cwiseProduct( node2BeliefMatrix ); //cout << "Edge potentials" << endl << edgePotentials << endl; //cout << "Edge beliefs" << endl << edgeBelief << endl; // Normalize edgeBelief = edgeBelief / edgeBelief.sum(); edgeBeliefs[ edgeID ] = edgeBelief; } // // 4. Compute logZ // double energyNodes = 0; double energyEdges = 0; double entropyNodes = 0; double entropyEdges = 0; // Compute energy and entropy from nodes for ( size_t nodeIndex = 0; nodeIndex < nodes.size(); nodeIndex++ ) { CNodePtr nodePtr = nodes[ nodeIndex ]; size_t nodeID = nodePtr->getID(); size_t N_Neighbors = graph.getNumberOfNodeNeighbors( nodeID ); // Useful computations and shorcuts VectorXd &nodeBelief = nodeBeliefs[nodeID]; VectorXd logNodeBelief = nodeBeliefs[nodeID].array().log(); VectorXd nodePotentials = nodePtr->getPotentials( m_options.considerNodeFixedValues ); VectorXd logNodePotentials = nodePotentials.array().log(); // Entropy from the node energyNodes += N_Neighbors*( nodeBelief.cwiseProduct( logNodeBelief ).sum() ); // Energy from the node entropyNodes += N_Neighbors*( nodeBelief.cwiseProduct( logNodePotentials ).sum() ); } // Compute energy and entropy from nodes for ( size_t edgeIndex = 0; edgeIndex < N_edges; edgeIndex++ ) { CEdgePtr edgePtr = edges[ edgeIndex ]; size_t edgeID = edgePtr->getID(); // Useful computations and shorcuts MatrixXd &edgeBelief = edgeBeliefs[ edgeID ]; MatrixXd logEdgeBelief = edgeBelief.array().log(); MatrixXd &edgePotentials = edgePtr->getPotentials(); MatrixXd logEdgePotentials = edgePotentials.array().log(); // Entropy from the edge energyEdges += edgeBelief.cwiseProduct( logEdgeBelief ).sum(); // Energy from the edge entropyEdges += edgeBelief.cwiseProduct( logEdgePotentials ).sum(); } // Final Bethe free energy double BethefreeEnergy = ( energyNodes - energyEdges ) - ( entropyNodes - entropyEdges ); // Compute logZ logZ = - BethefreeEnergy; }
size_t UPGMpp::messagesLBP(CGraph &graph, TInferenceOptions &options, vector<vector<VectorXd> > &messages , bool maximize, const vector<size_t> &tree) { const vector<CNodePtr> nodes = graph.getNodes(); const vector<CEdgePtr> edges = graph.getEdges(); multimap<size_t,CEdgePtr> edges_f = graph.getEdgesF(); size_t N_nodes = nodes.size(); size_t N_edges = edges.size(); bool is_tree = (tree.size()>0) ? true : false; //graph.computePotentials(); // // Build the messages structure // double totalSumOfMsgs = 0; if ( !messages.size() ) messages.resize( N_edges); for ( size_t i = 0; i < N_edges; i++ ) { if ( !messages[i].size() ) { messages[i].resize(2); size_t ID1, ID2; edges[i]->getNodesID(ID1,ID2); // Messages from first node of the edge to the second one, so the size of // the message has to be the same as the number of classes of the second node. double N_classes = graph.getNodeWithID( ID2 )->getPotentials( options.considerNodeFixedValues ).rows(); messages[i][0].resize( N_classes ); messages[i][0].fill(1.0/N_classes); // Just the opposite as before. N_classes = graph.getNodeWithID( ID1 )->getPotentials( options.considerNodeFixedValues ).rows(); messages[i][1].resize( N_classes ); messages[i][1].fill(1.0/N_classes); } totalSumOfMsgs += messages[i][0].rows() + messages[i][1].rows(); } // cout << "Initial Messages:" << endl; // for ( size_t i=0; i < messages.size(); i++) // for ( size_t j=0; j < messages[i].size(); j++) // for ( size_t k=0; k < messages[i][j].size(); k++ ) // cout << messages[i][j][k] << " "; vector<vector<VectorXd> > previousMessages; if ( options.particularS["order"] == "RBP" ) { previousMessages = messages; for ( size_t i = 0; i < previousMessages.size(); i++ ) { previousMessages[i][0].fill(0); previousMessages[i][1].fill(0); } } // // Iterate until convergence or a certain maximum number of iterations is reached // size_t iteration; // cout << endl; for ( iteration = 0; iteration < options.maxIterations; iteration++ ) { // cout << "Messages " << iteration << ":" << endl; // for ( size_t i=0; i < messages.size(); i++) // for ( size_t j=0; j < messages[i].size(); j++) // for ( size_t k=0; k < messages[i][j].size(); k++ ) // cout << messages[i][j][k] << " "; // cout << endl; // Variables used by Residual Belief Propagation int edgeWithMaxDiffIndex = -1; VectorXd associatedMessage; bool from1to2; double maxDifference = -1; // // Iterate over all the nodes // for ( size_t nodeIndex = 0; nodeIndex < N_nodes; nodeIndex++ ) { const CNodePtr nodePtr = graph.getNode( nodeIndex ); size_t nodeID = nodePtr->getID(); // Check if we are calibrating a tree, and so if the node is not member of the tree, // so we dont have to update its messages if ( is_tree && ( std::find(tree.begin(), tree.end(), nodeID ) == tree.end() ) ) continue; NEIGHBORS_IT neighbors; neighbors = edges_f.equal_range(nodeID); //cout << " Sending messages ... " << endl; // // Send a message to each neighbor // for ( multimap<size_t,CEdgePtr>::iterator itNeigbhor = neighbors.first; itNeigbhor != neighbors.second; itNeigbhor++ ) { // cout << "sending msg to neighbor..." << endl; VectorXd nodePotPlusIncMsg = nodePtr->getPotentials( options.considerNodeFixedValues ); // cout << "nodePotPlusIncMsg Orig: " << nodePotPlusIncMsg.transpose() << endl; size_t neighborID; size_t ID1, ID2; CEdgePtr edgePtr( (*itNeigbhor).second ); edgePtr->getNodesID(ID1,ID2); ( ID1 == nodeID ) ? neighborID = ID2 : neighborID = ID1; // cout << "all ready" << endl; // Check if we are calibrating a tree, and so if the neighbor node // is not member of the tree, so we dont have to update its messages if ( is_tree && ( std::find(tree.begin(), tree.end(), neighborID ) == tree.end() )) continue; // // Compute the message from current node as a product of all the // incoming messages less the one from the current neighbor // plus the node potential of the current node. // for ( multimap<size_t,CEdgePtr>::iterator itNeigbhor2 = neighbors.first; itNeigbhor2 != neighbors.second; itNeigbhor2++ ) { size_t ID11, ID12; CEdgePtr edgePtr2( (*itNeigbhor2).second ); edgePtr2->getNodesID(ID11,ID12); size_t edgeIndex = graph.getEdgeIndex( edgePtr2->getID() ); // cout << "Edge index: " << edgeIndex << endl << "node pot" << nodePotPlusIncMsg << endl; // cout << "Node ID: " << nodeID << " node11 " << ID11 << " node12 " << ID12 << endl; CNodePtr n1,n2; edgePtr2->getNodes(n1,n2); // cout << "Node 1 type: " << n1->getType()->getID() << " label " << n1->getType()->getLabel() << endl; // cout << "Node 2 type: " << n2->getType()->getID() << " label " << n2->getType()->getLabel() << endl; // Check if the current neighbor appears in the edge if ( ( neighborID != ID11 ) && ( neighborID != ID12 ) ) { if ( nodeID == ID11 ) { // cout << "nodePotPlusIncMsg Prod: " << messages[ edgeIndex ][ 1 ].transpose() << endl; // cout << "nodePotPlusIncMsg Bis : " << messages[ edgeIndex ][ 0 ].transpose() << endl; nodePotPlusIncMsg = nodePotPlusIncMsg.cwiseProduct(messages[ edgeIndex ][ 1 ]); // cout << "nodePotPlusIncMsg Prod2: " << nodePotPlusIncMsg.transpose() << endl; } else // nodeID == ID2 { // cout << "nodePotPlusIncMsg Prod: " << messages[ edgeIndex ][ 0 ].transpose() << endl; // cout << "nodePotPlusIncMsg Bis : " << messages[ edgeIndex ][ 1 ].transpose() << endl; nodePotPlusIncMsg = nodePotPlusIncMsg.cwiseProduct(messages[ edgeIndex ][ 0 ]); // cout << "nodePotPlusIncMsg Prod2: " << nodePotPlusIncMsg.transpose() << endl; } } } // cout << "Node pot" << endl; //cout << "Node pot" << nodePotPlusIncMsg << endl; // // Take also the potential between the two nodes // MatrixXd edgePotentials; if ( nodeID != ID1 ) edgePotentials = edgePtr->getPotentials(); else edgePotentials = edgePtr->getPotentials().transpose(); VectorXd newMessage; size_t edgeIndex = graph.getEdgeIndex( edgePtr->getID() ); // cout << "get new message" << endl; if ( !maximize ) { // Multiply both, and update the potential // cout << "Edge potentials:" << edgePotentials.transpose() << endl; // cout << "nodePotPlusIncMsg:" << nodePotPlusIncMsg.transpose() << endl; newMessage = edgePotentials * nodePotPlusIncMsg; // Normalize new message if (newMessage.sum()) newMessage = newMessage / newMessage.sum(); //cout << "New message 3:" << newMessage.transpose() << endl; } else { if ( nodeID == ID1 ) newMessage.resize(messages[ edgeIndex ][0].rows()); else newMessage.resize(messages[ edgeIndex ][1].rows()); for ( size_t row = 0; row < edgePotentials.rows(); row++ ) { double maxRowValue = std::numeric_limits<double>::min(); for ( size_t col = 0; col < edgePotentials.cols(); col++ ) { double value = edgePotentials(row,col)*nodePotPlusIncMsg(col); if ( value > maxRowValue ) maxRowValue = value; } newMessage(row) = maxRowValue; } // Normalize new message if (newMessage.sum()) newMessage = newMessage / newMessage.sum(); //cout << "New message: " << endl << newMessage << endl; } // // Set the message! // VectorXd smoothedOldMessage(newMessage.rows()); smoothedOldMessage.setZero(); double smoothing = options.particularD["smoothing"]; if ( smoothing != 0 ) if ( nodeID == ID1 ) newMessage = newMessage + (1-smoothing) * messages[ edgeIndex ][0]; else newMessage = newMessage + (1-smoothing) * messages[ edgeIndex ][1]; //cout << "New message:" << endl << newMessage << endl << "Smoothed" << endl << smoothedOldMessage << endl; // If residual belief propagation is activated, just check if the // newMessage is the one with the higest residual till the // moment. Otherwise, set the new message as the current one if ( options.particularS["order"] == "RBP" ) { if ( nodeID == ID1 ) { VectorXd differences = messages[edgeIndex][0] - newMessage; double difference = differences.cwiseAbs().sum(); if ( difference > maxDifference ) { from1to2 = true; edgeWithMaxDiffIndex = edgeIndex; maxDifference = difference; associatedMessage = newMessage; } } else { VectorXd differences = messages[edgeIndex][1] - newMessage; double difference = differences.cwiseAbs().sum(); if ( difference > maxDifference ) { from1to2 = false; edgeWithMaxDiffIndex = edgeIndex; maxDifference = difference; associatedMessage = newMessage; } } } else { // cout << newMessage.cols() << " " << newMessage.rows() << endl; // cout << "edgeIndex" << edgeIndex << endl; if ( nodeID == ID1 ) { // cout << messages[ edgeIndex ][0].cols() << " " << messages[ edgeIndex ][0].rows() << endl; messages[ edgeIndex ][0] = newMessage; } else { // cout << messages[ edgeIndex ][1].cols() << " " << messages[ edgeIndex ][1].rows() << endl; messages[ edgeIndex ][1] = newMessage; } // cout << "Wop " << endl; } } } // Nodes if ( options.particularS["order"] == "RBP" && ( edgeWithMaxDiffIndex =! -1 )) { if ( from1to2 ) messages[ edgeWithMaxDiffIndex ][0] = associatedMessage; else messages[ edgeWithMaxDiffIndex ][1] = associatedMessage; } // // Check convergency!! // double newTotalSumOfMsgs = 0; for ( size_t i = 0; i < N_edges; i++ ) { newTotalSumOfMsgs += messages[i][0].sum() + messages[i][1].sum(); } //printf("%4.10f\n",std::abs( totalSumOfMsgs - newTotalSumOfMsgs )); if ( std::abs( totalSumOfMsgs - newTotalSumOfMsgs ) < options.convergency ) break; totalSumOfMsgs = newTotalSumOfMsgs; // Show messages /*cout << "Iteration:" << iteration << endl; for ( size_t i = 0; i < messages.size(); i++ ) { cout << messages[i][0] << " " << messages[i][1] << endl; }*/ } // Iterations return 1; }
double Sampler::fullIntegrate( const VectorXd &distribution, double delta) { return distribution.sum()*delta; }
void CTRPBPInference::infer(CGraph &graph, map<size_t,VectorXd> &nodeBeliefs, map<size_t,MatrixXd> &edgeBeliefs, double &logZ) { // // Algorithm workflow: // 1. Compute the messages passed // 2. Compute node beliefs // 3. Compute edge beliefs // 4. Compute logZ // nodeBeliefs.clear(); edgeBeliefs.clear(); const vector<CNodePtr> nodes = graph.getNodes(); const vector<CEdgePtr> edges = graph.getEdges(); multimap<size_t,CEdgePtr> edges_f = graph.getEdgesF(); size_t N_nodes = nodes.size(); size_t N_edges = edges.size(); // // 1. Create spanning trees // bool allNodesAdded = false; vector<vector<size_t > > v_trees; vector<bool> v_addedNodes(N_nodes,false); map<size_t,size_t> addedNodesMap; for (size_t i = 0; i < N_nodes; i++) addedNodesMap[ nodes[i]->getID() ] = i; while (!allNodesAdded) { allNodesAdded = true; vector<size_t> tree; getSpanningTree( graph, tree ); // Check that the tree is not empty if ( tree.size() ) v_trees.push_back( tree ); cout << "Tree: "; for ( size_t i_node = 0; i_node < tree.size(); i_node++ ) { v_addedNodes[ addedNodesMap[tree[i_node]] ] = true; cout << tree[i_node] << " "; } cout << endl; for ( size_t i_node = 0; i_node < N_nodes; i_node++ ) if ( !v_addedNodes[i_node] ) { allNodesAdded = false; break; } } // // 1. Compute messages passed in each tree until convergence // vector<vector<VectorXd> > messages; bool maximize = false; double totalSumOfMsgs = std::numeric_limits<double>::max(); size_t iteration; for ( iteration = 0; iteration < m_options.maxIterations; iteration++ ) { for ( size_t i_tree=0; i_tree < v_trees.size(); i_tree++ ) messagesLBP( graph, m_options, messages, maximize, v_trees[i_tree] ); double newTotalSumOfMsgs = 0; for ( size_t i = 0; i < N_edges; i++ ) { newTotalSumOfMsgs += messages[i][0].sum() + messages[i][1].sum(); } if ( std::abs( totalSumOfMsgs - newTotalSumOfMsgs ) < m_options.convergency ) break; totalSumOfMsgs = newTotalSumOfMsgs; } // // 2. Compute node beliefs // for ( size_t nodeIndex = 0; nodeIndex < N_nodes; nodeIndex++ ) { const CNodePtr nodePtr = graph.getNode( nodeIndex ); size_t nodeID = nodePtr->getID(); VectorXd nodePotPlusIncMsg = nodePtr->getPotentials( m_options.considerNodeFixedValues ); NEIGHBORS_IT neighbors = edges_f.equal_range(nodeID); // // Get the messages for all the neighbors, and multiply them with the node potential // for ( multimap<size_t,CEdgePtr>::iterator itNeigbhor = neighbors.first; itNeigbhor != neighbors.second; itNeigbhor++ ) { CEdgePtr edgePtr( (*itNeigbhor).second ); size_t edgeIndex = graph.getEdgeIndex( edgePtr->getID() ); if ( !edgePtr->getNodePosition( nodeID ) ) // nodeID is the first node in the edge nodePotPlusIncMsg = nodePotPlusIncMsg.cwiseProduct(messages[ edgeIndex ][ 1 ]); else // nodeID is the second node in the dege nodePotPlusIncMsg = nodePotPlusIncMsg.cwiseProduct(messages[ edgeIndex ][ 0 ]); } // Normalize nodePotPlusIncMsg = nodePotPlusIncMsg / nodePotPlusIncMsg.sum(); nodeBeliefs[ nodeID ] = nodePotPlusIncMsg; //cout << "Beliefs of node " << nodeIndex << endl << nodePotPlusIncMsg << endl; } // // 3. Compute edge beliefs // for ( size_t edgeIndex = 0; edgeIndex < N_edges; edgeIndex++ ) { CEdgePtr edgePtr = edges[edgeIndex]; size_t edgeID = edgePtr->getID(); size_t ID1, ID2; edgePtr->getNodesID( ID1, ID2 ); MatrixXd edgePotentials = edgePtr->getPotentials(); MatrixXd edgeBelief = edgePotentials; VectorXd &message1To2 = messages[edgeIndex][0]; VectorXd &message2To1 = messages[edgeIndex][1]; //cout << "----------------------" << endl; //cout << nodeBeliefs[ ID1 ] << endl; //cout << "----------------------" << endl; //cout << message2To1 << endl; VectorXd node1Belief = nodeBeliefs[ ID1 ].cwiseQuotient( message2To1 ); VectorXd node2Belief = nodeBeliefs[ ID2 ].cwiseQuotient( message1To2 ); //cout << "----------------------" << endl; MatrixXd node1BeliefMatrix ( edgePotentials.rows(), edgePotentials.cols() ); for ( size_t row = 0; row < edgePotentials.rows(); row++ ) for ( size_t col = 0; col < edgePotentials.cols(); col++ ) node1BeliefMatrix(row,col) = node1Belief(row); //cout << "Node 1 belief matrix: " << endl << node1BeliefMatrix << endl; edgeBelief = edgeBelief.cwiseProduct( node1BeliefMatrix ); MatrixXd node2BeliefMatrix ( edgePotentials.rows(), edgePotentials.cols() ); for ( size_t row = 0; row < edgePotentials.rows(); row++ ) for ( size_t col = 0; col < edgePotentials.cols(); col++ ) node2BeliefMatrix(row,col) = node2Belief(col); //cout << "Node 2 belief matrix: " << endl << node2BeliefMatrix << endl; edgeBelief = edgeBelief.cwiseProduct( node2BeliefMatrix ); //cout << "Edge potentials" << endl << edgePotentials << endl; //cout << "Edge beliefs" << endl << edgeBelief << endl; // Normalize edgeBelief = edgeBelief / edgeBelief.sum(); edgeBeliefs[ edgeID ] = edgeBelief; } // // 4. Compute logZ // double energyNodes = 0; double energyEdges = 0; double entropyNodes = 0; double entropyEdges = 0; // Compute energy and entropy from nodes for ( size_t nodeIndex = 0; nodeIndex < nodes.size(); nodeIndex++ ) { CNodePtr nodePtr = nodes[ nodeIndex ]; size_t nodeID = nodePtr->getID(); size_t N_Neighbors = graph.getNumberOfNodeNeighbors( nodeID ); // Useful computations and shorcuts VectorXd &nodeBelief = nodeBeliefs[nodeID]; VectorXd logNodeBelief = nodeBeliefs[nodeID].array().log(); VectorXd nodePotentials = nodePtr->getPotentials( m_options.considerNodeFixedValues ); VectorXd logNodePotentials = nodePotentials.array().log(); // Entropy from the node energyNodes += N_Neighbors*( nodeBelief.cwiseProduct( logNodeBelief ).sum() ); // Energy from the node entropyNodes += N_Neighbors*( nodeBelief.cwiseProduct( logNodePotentials ).sum() ); } // Compute energy and entropy from nodes for ( size_t edgeIndex = 0; edgeIndex < N_edges; edgeIndex++ ) { CEdgePtr edgePtr = edges[ edgeIndex ]; size_t edgeID = edgePtr->getID(); // Useful computations and shorcuts MatrixXd &edgeBelief = edgeBeliefs[ edgeID ]; MatrixXd logEdgeBelief = edgeBelief.array().log(); MatrixXd &edgePotentials = edgePtr->getPotentials(); MatrixXd logEdgePotentials = edgePotentials.array().log(); // Entropy from the edge energyEdges += edgeBelief.cwiseProduct( logEdgeBelief ).sum(); // Energy from the edge entropyEdges += edgeBelief.cwiseProduct( logEdgePotentials ).sum(); } // Final Bethe free energy double BethefreeEnergy = ( energyNodes - energyEdges ) - ( entropyNodes - entropyEdges ); // Compute logZ logZ = - BethefreeEnergy; }