VectorXf EMclustering::logsumexp(MatrixXf x, int dim) { int r = x.rows(); int c = x.cols(); VectorXf y(r); MatrixXf tmp1(r,c); VectorXf tmp2(r); VectorXf s(r); y = x.rowwise().maxCoeff();//cerr<<"y"<<y<<endl<<endl; x = x.colwise() - y; //cerr<<"x"<<x<<endl<<endl; tmp1 = x.array().exp(); //cerr<<"t"<<tmp1<<endl<<endl; tmp2 = tmp1.rowwise().sum(); //cerr<<"t"<<tmp2<<endl<<endl; s = y.array() + tmp2.array().log(); for(int i=0;i<s.size();i++) { if(!isfinite(s(i))) { s(i) = y(i); } } y.resize(0); tmp1.resize(0,0); tmp2.resize(0); return s; }
MatrixXf Sphere::make_initial_simplex( const VectorXf &pars, float size ) { /* * Make the initial tetrahedron */ int npar = pars.size(); MatrixXf simplex = MatrixXf::Zero(npar+1,npar); simplex.rowwise() += pars.transpose(); for (int k = 1; k < npar+1; k++) { simplex(k,k-1) += size; } return simplex; }
float Sphere::fit_eval ( const VectorXf &fitpar, const void *user_data) { /* * Calculate the cost function value * Optimize for the radius inside here */ const fitUserNew& user = (fitUserNew)user_data; const VectorXf& r0 = fitpar; float F; MatrixXf diff = user->rr.rowwise() - r0.transpose(); VectorXf one = diff.rowwise().norm(); float sum = one.sum(); float sum2 = one.dot(one); F = sum2 - sum*sum/user->rr.rows(); if(user->report) std::cout << "r0: " << 1000*r0[0] << ", r1: " << 1000*r0[1] << ", r2: " << 1000*r0[2] << "; R: " << 1000*sum/user->rr.rows() << "; fval: "<<F<<std::endl; return F; }
float Sphere::opt_rad(const VectorXf &r0,const fitUserNew user) { MatrixXf diff = user->rr.rowwise() - r0.transpose(); return diff.rowwise().norm().mean(); }
void Sphere::calculate_cm_ave_dist (const MatrixXf &rr, VectorXf &cm, float &avep) { cm = rr.colwise().mean(); MatrixXf diff = rr.rowwise() - cm.transpose(); avep = diff.rowwise().norm().mean(); }
void MapOptimizer::constrain_direction (MatrixXf & dJ, float tol) const { // Enforce the simultaneous constraints // // /\x. sum y. dJ(y,x) = 0 // /\y. sum x. dJ(y,x) = 0 // // We combine the two constraints by iteratively weakly enforcing both: // Let Px,Py project to the feasible subspaces for constraints 1,2, resp. // Each projection has eigenvalues in {0,1}. // We approximate the desired projection Pxy as a linear combination of Px,Py // Pxy' = 1 - alpha ((1-Px) + (1-Py)) // which has eigenvalues in {1} u [1 - alpha, 1 - 2 alpha]. // Hence Pxy = lim n->infty Pxy'^n, where convergence rate depends on alpha. // The optimal alpha is 2/3, yielding Pxy' eigenvalues in {1} u [-1/3,1/3], // and resulting in project_scale = -alpha below. if (logging) LOG(" constraining direction"); const size_t X = dom.size; const size_t Y = cod.size; const MatrixXf & J = m_joint; const VectorXf & sum_y_J = m_dom_prior; const VectorXf & sum_x_J = m_cod_prior; const float sum_xy_J = m_cod_prior.sum(); VectorXf sum_y_dJ(J.cols()); VectorXf sum_x_dJ(J.rows()); // this is iterative, so we hand-optimize by merging loops const float * restrict J_ = J.data(); const float * restrict sum_y_J_ = sum_y_J.data(); const float * restrict sum_x_J_ = sum_x_J.data(); float * restrict dJ_ = dJ.data(); float * restrict project_y_ = sum_y_dJ.data(); float * restrict project_x_ = sum_x_dJ.data(); const float project_scale = -2/3.0; Vector<float> accum_x_dJ(Y); float * restrict accum_x_dJ_ = accum_x_dJ; // accumulate first projection accum_x_dJ.zero(); for (size_t x = 0; x < X; ++x) { const float * restrict dJ_x_ = dJ_ + Y * x; float accum_y_dJ = 0; for (size_t y = 0; y < Y; ++y) { float dJ_xy = dJ_x_[y]; accum_y_dJ += dJ_xy; accum_x_dJ_[y] += dJ_xy; } project_y_[x] = project_scale * accum_y_dJ / sum_y_J_[x]; } for (size_t y = 0; y < Y; ++y) { project_x_[y] = project_scale * accum_x_dJ_[y] / sum_x_J_[y]; accum_x_dJ_[y] = 0; } // apply previous projection and accumulate next projection for (size_t iter = 0; iter < 100; ++iter) { float error = 0; for (size_t x = 0; x < X; ++x) { const float * restrict J_x_ = J_ + Y * x; float * restrict dJ_x_ = dJ_ + Y * x; float accum_y_dJ = 0; for (size_t y = 0; y < Y; ++y) { float dJ_xy = dJ_x_[y] += J_x_[y] * (project_x_[y] + project_y_[x]); accum_y_dJ += dJ_xy; accum_x_dJ_[y] += dJ_xy; } project_y_[x] = project_scale * accum_y_dJ / sum_y_J_[x]; imax(error, max(-accum_y_dJ, accum_y_dJ)); } for (size_t y = 0; y < Y; ++y) { float accum_x_dJ_y = accum_x_dJ_[y]; accum_x_dJ_[y] = 0; project_x_[y] = project_scale * accum_x_dJ_y / sum_x_J_[y]; imax(error, max(-accum_x_dJ_y, accum_x_dJ_y)); } if (error < tol) { if (logging) { LOG(" after " << (1+iter) << " iterations, error < " << error); } break; } } // apply final projection for (size_t x = 0; x < X; ++x) { const float * restrict J_x_ = J_ + Y * x; float * restrict dJ_x_ = dJ_ + Y * x; for (size_t y = 0; y < Y; ++y) { dJ_x_[y] += J_x_[y] * (project_x_[y] + project_y_[x]); } } if (debug) { sum_y_dJ = dJ.colwise().sum(); sum_x_dJ = dJ.rowwise().sum(); float sum_xy_dJ = sum_x_dJ.sum(); DEBUG("max constraint errors = " << sqrt(sum_x_dJ.array().square().maxCoeff())<< ", " << sqrt(sum_y_dJ.array().square().maxCoeff())<< ", " << sum_xy_dJ); sum_y_dJ.array() /= sum_y_J.array(); sum_x_dJ.array() /= sum_x_J.array(); sum_xy_dJ /= sum_xy_J; DEBUG("max relative constraints errors = " << sqrt(sum_x_dJ.array().square().maxCoeff()) << ", " << sqrt(sum_y_dJ.array().square().maxCoeff()) << ", " << sum_xy_dJ); DEBUG("max(|dJ|) = " << dJ.array().abs().maxCoeff() << ", rms(dJ) = " << sqrt(dJ.array().square().mean())); DEBUG("max(J) / min(J) = " << (J.maxCoeff() / J.minCoeff())); DEBUG("max(sum x. J) / min(sum x. J) = " << (sum_x_J.maxCoeff() / sum_x_J.minCoeff())); DEBUG("max(sum y. J) / min(sum y. J) = " << (sum_y_J.maxCoeff() / sum_y_J.minCoeff())); } }
int main(void) { cout << "Eigen v" << EIGEN_WORLD_VERSION << "." << EIGEN_MAJOR_VERSION << "." << EIGEN_MINOR_VERSION << endl; static const int R = 288; static const int N = R*(R+1)/2; static const int M = 63; static const int HALF_M = M/2; static const float nsigma = 2.5f; MatrixXf data = MatrixXf::Random(M, N); MatrixXf mask = MatrixXf::Zero(M, N); MatrixXf result = MatrixXf::Zero(1, N); VectorXf std = VectorXf::Zero(N); VectorXf centroid = VectorXf::Zero(N); VectorXf mean = VectorXf::Zero(N); VectorXf minval = VectorXf::Zero(N); VectorXf maxval = VectorXf::Zero(N); cout << "computing..." << flush; double t = GetRealTime(); // computes the exact median if (M&1) { #pragma omp parallel for for (int i = 0; i < N; i++) { vector<float> row(data.data()+i*M, data.data()+(i+1)*M); nth_element(row.begin(), row.begin()+HALF_M, row.end()); centroid(i) = row[HALF_M]; } } // nth_element guarantees x_0,...,x_{n-1} < x_n else { #pragma omp parallel for for (int i = 0; i < N; i++) { vector<float> row(data.data()+i*M, data.data()+(i+1)*M); nth_element(row.begin(), row.begin()+HALF_M, row.end()); centroid(i) = row[HALF_M]; centroid(i) += *max_element(row.begin(), row.begin()+HALF_M); centroid(i) *= 0.5f; } } // compute the mean mean = data.colwise().mean(); // compute std (x) = sqrt ( 1/N SUM_i (x(i) - mean(x))^2 ) std = (((data.rowwise() - mean.transpose()).array().square()).colwise().sum() * (1.0f / M)) .array() .sqrt(); // compute n sigmas from centroid minval = centroid - std * nsigma; maxval = centroid + std * nsigma; // compute clip mask for (int i = 0; i < N; i++) { mask.col(i) = (data.col(i).array() > minval(i)).select(VectorXf::Ones(M), 0.0f); mask.col(i) = (data.col(i).array() < maxval(i)).select(VectorXf::Ones(M), 0.0f); } // apply clip mask to data data.array() *= mask.array(); // compute mean such that we ignore clipped data, this is our final result result = data.colwise().sum().array() / mask.colwise().sum().array(); t = GetRealTime() - t; cout << "[done]" << endl << endl; size_t bytes = data.size()*sizeof(float); cout << "data: " << M << "x" << N << endl; cout << "size: " << bytes*1e-6f << " MB" << endl; cout << "rate: " << bytes/(1e6f*t) << " MB/s" << endl; cout << "time: " << t << " s" << endl; return 0; }
/* * Get centroid */ Vector3f getCentroid(const MatrixXf m) { return m.rowwise().sum() / m.cols(); }
void sparsify_soft_relative_to_row_col_max ( const MatrixXf & dense, MatrixSf & sparse, float relthresh, bool ignore_diagonal) { ASSERT_LT(0, relthresh); LOG("sparsifying " << dense.rows() << " x " << dense.cols() << " positive matrix to relative threshold " << relthresh); VectorXf row_max; VectorXf col_max; if (ignore_diagonal) { VectorXf diag = dense.diagonal(); const_cast<MatrixXf &>(dense).diagonal().setZero(); row_max = dense.rowwise().maxCoeff(); col_max = dense.colwise().maxCoeff(); const_cast<MatrixXf &>(dense).diagonal() = diag; } else { row_max = dense.rowwise().maxCoeff(); col_max = dense.colwise().maxCoeff(); } const int I = dense.rows(); const int J = dense.cols(); sparse.resize(I,J); double sum_dense = 0; double sum_sparse = 0; for (int j = 0; j < J; ++j) { for (int i = 0; i < I; ++i) { const float dense_ij = dense(i,j); sum_dense += dense_ij; const float thresh = relthresh * min(row_max(i), col_max(j)); if (dense_ij > thresh) { sparse.insert(i,j) = dense_ij; sum_sparse += dense_ij; } } } sparse.finalize(); float density = sparse.nonZeros() / float(I * J); float loss = (sum_dense - sum_sparse) / sum_dense; LOG("sparsifying to density " << density << " loses " << (100 * loss) << "% of mass"); }