int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); // The variable to solve for with its initial value. It will be // mutated in place by the solver. double x = 0.5; const double initial_x = x; // Build the problem. Problem problem; // Set up the only cost function (also known as residual). This uses // auto-differentiation to obtain the derivative (jacobian). CostFunction* cost_function = new AutoDiffCostFunction<CostFunctor, 1, 1>(new CostFunctor); problem.AddResidualBlock(cost_function, NULL, &x); // Run the solver! Solver::Options options; options.minimizer_progress_to_stdout = true; Solver::Summary summary; Solve(options, &problem, &summary); std::cout << summary.BriefReport() << "\n"; std::cout << "x : " << initial_x << " -> " << x << "\n"; return 0; }
// ================================================================================================ // =============================== FUNCTIONS of CLASS BALOptimizer ================================ // ================================================================================================ void BALOptimizer::runBAL() { int num_cams = visibility->rows(); int num_features = visibility->cols(); int step_tr = translation_and_intrinsics->rows(); int step_st = structure->rows(); double cost; quaternion_vector2eigen_vector( *quaternion, q_vector ); Problem problem; ceres::LossFunction* loss_function = new ceres::HuberLoss(1.0); Solver::Options options; options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY; options.minimizer_progress_to_stdout = true; options.gradient_tolerance = 1e-16; options.function_tolerance = 1e-16; options.num_threads = 8; options.max_num_iterations = 50; for (register int cam = 0; cam < num_cams; ++cam) { for (register int ft = 0; ft < num_features ; ++ft) { if( (*visibility)(cam,ft) == true ) { CostFunction* cost_function = new AutoDiffCostFunction<Snavely_RE_KDQTS, 2, 4, 6, 3>( new Snavely_RE_KDQTS( (*coordinates)(cam,ft)(0), (*coordinates)(cam,ft)(1)) ); problem.AddResidualBlock(cost_function, loss_function, q_vector[cam].data(), (translation_and_intrinsics->data()+step_tr*cam), (structure->data()+step_st*ft) ); } } } cost = 0; problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL); std::cout << "Initial RMS Reprojection Error is : " << std::sqrt(double(cost/num_features)) << "\n"; Solver::Summary summary; Solve(options, &problem, &summary); std::cout << summary.BriefReport() << "\n"; cost = 0; problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL); std::cout << "RMS Reprojection Error is : " << std::sqrt(double(cost/num_features)) << "\n\n"; update(); // update quaternion; normaliza translation 1 return; }
void lidarBoostEngine::build_superresolution(short coeff) { std::cout<< "Num of clouds : " << Y.size() << std::endl; // std::cout << Y[0] << std::endl; beta = coeff; std::vector < MatrixXd > optflow = lk_optical_flow( Y[2], Y[4], 10 ); MatrixXd D( beta*n, beta*m ); //, X( beta*n, beta*m ); // SparseMatrix<double> W( beta*n, beta*m ), T( beta*n, beta*m ); D = apply_optical_flow(Y[2], optflow); T = check_unreliable_samples(intensityMap[2], 0.0001); MatrixXd up_D = nearest_neigh_upsampling(D); //// Display and Debug cv::Mat M(n, m, CV_32FC1); // MatrixXd diff1(n, m); // diff1 = MatrixXd::Ones(n, m) - Y[0]; cv::eigen2cv(Y[2], M); cv::Mat M1(n, m, CV_32FC1); cv::eigen2cv(Y[4], M1); // MatrixXd diff(beta*n, beta*m); // diff = MatrixXd::Ones(beta*n, beta*m) - up_D; cv::Mat M2(beta*n, beta*m, CV_32FC1); cv::eigen2cv(up_D, M2); cv::namedWindow("check", cv::WINDOW_AUTOSIZE ); cv::imshow("check", M); cv::namedWindow("check1", cv::WINDOW_AUTOSIZE ); cv::imshow("check1", M1); cv::namedWindow("check2", cv::WINDOW_AUTOSIZE ); cv::imshow("check2", M2); //// Solve example equation with eigen // Eigen::VectorXd x(2); // x(0) = 10.0; // x(1) = 25.0; // std::cout << "x: " << x << std::endl; // my_functor functor; // Eigen::NumericalDiff<my_functor> numDiff(functor); // Eigen::LevenbergMarquardt<Eigen::NumericalDiff<my_functor>,double> lm(numDiff); // lm.parameters.maxfev = 2000; // lm.parameters.xtol = 1.0e-10; // std::cout << lm.parameters.maxfev << std::endl; // int ret = lm.minimize(x); // std::cout << lm.iter << std::endl; // std::cout << ret << std::endl; // std::cout << "x that minimizes the function: " << x << std::endl; ////// Try to solve lidarboost with Eigen // my_functor functor; // Eigen::NumericalDiff<my_functor> numDiff(functor); // Eigen::LevenbergMarquardt<Eigen::NumericalDiff<my_functor>,double> lm(numDiff); // lm.parameters.maxfev = 2000; // lm.parameters.xtol = 1.0e-10; // std::cout << lm.parameters.maxfev << std::endl; // VectorXd val(2); // for(int i = 0; i < X.rows(); i++) // { // for(int j = 0; j < X.cols(); j++) // { // val = X(i, j); // int ret = lm.minimize(val); // } // } // std::cout << lm.iter << std::endl; // std::cout << ret << std::endl; // std::cout << "x that minimizes the function: " << X << std::endl; //// Solve example using ceres // The variable to solve for with its initial value. // double initial_x = 5.0; // double x = initial_x; MatrixXd X(beta*n, beta*m);// init_X(beta*n, beta*m); // X = MatrixXd::Zero(beta*n,beta*m); X = up_D; // MatrixXd init_X( beta*n, beta*m ); // init_X = X; // int M[2][2], M2[2][2]; // M[0][0] = 5; // M[1][0] = 10; // M[0][1] = 20; // M[1][1] = 30; // M2 = *M; // Build the problem. Problem problem; // Set up the only cost function (also known as residual). This uses // auto-differentiation to obtain the derivative (jacobian). double val, w, t, d; Solver::Options options; options.linear_solver_type = ceres::DENSE_QR; options.minimizer_progress_to_stdout = false; Solver::Summary summary; for(int i = 0; i < X.rows(); i++) { for(int j = 0; j < X.cols(); j++) { val = X(i, j); w = W(i, j); t = T(i, j); d = up_D(i, j); std::cout << "i = " << i << "; j = " << j << std::endl; std::cout << "w = " << w << "; t = " << t << "; d = " << d << std::endl; CostFunction* cost_function = new AutoDiffCostFunction<CostFunctor, 1, 1>(new CostFunctor(w, t, d)); problem.AddResidualBlock(cost_function, NULL, &val); // Run the solver Solve(options, &problem, &summary); X(i, j) = val; } } std::cout << summary.BriefReport() << "\n"; // std::cout << "x : " << init_X // << " -> " << X << "\n"; cv::Mat M3(beta*n, beta*m, CV_32FC1); cv::eigen2cv(X, M3); cv::namedWindow("check3", cv::WINDOW_AUTOSIZE ); cv::imshow("check3", M3); }