int main(void) { int i,iflag,j,nmin=0; float ax,bx,cx,fa,fb,fc,xmin,dbr,amin[21]; printf("\nMinima of the function bessj0\n"); printf("%10s %8s %16s %12s %11s\n", "min. #","x","bessj0(x)","bessj1(x)","DBRENT"); for (i=1;i<=100;i++) { ax=i; bx=i+1.0; mnbrak(&ax,&bx,&cx,&fa,&fb,&fc,func); dbr=dbrent(ax,bx,cx,func,dfunc,TOL,&xmin); if (nmin == 0) { amin[1]=xmin; nmin=1; printf("%7d %15.6f %12.6f %12.6f %12.6f\n", nmin,xmin,func(xmin),dfunc(xmin),dbr); } else { iflag=0; for (j=1;j<=nmin;j++) if (fabs(xmin-amin[j]) <= EQL*xmin) iflag=1; if (iflag == 0) { amin[++nmin]=xmin; printf("%7d %15.6f %12.6f %12.6f %12.6f\n", nmin,xmin,func(xmin),dfunc(xmin),dbr); } } } return 0; }
main() { int n=N,nx=NX,ix; float x,h=0.5,htry,hdid,hnext,eps=0.1; float y[N],ye[N],dydx[N],yscl[N]; Parms p; RKState *rk; p.freq = 1.0; p.phase = 3.0; yscl[0] = 1.0; yscl[1] = 1.0; rk = rkcreate(n,&p,(void(*)(float,void*,float[],float[]))dfunc); printf("\nEuler's method\n"); func(0.0,&p,y); for (ix=0,x=0.0; ix<nx-1; ix++,x+=h) { func(x,&p,ye); printf("x = %8.3f err[0] = %10.5f err[1] = %10.5f\n", x,y[0]-ye[0],y[1]-ye[1]); dfunc(x,&p,y,dydx); y[0] += h*dydx[0]; y[1] += h*dydx[1]; } printf("\nRunga-Kutta with constant step size\n"); func(0.0,&p,y); for (ix=0,x=0.0; ix<nx-1; ix++,x+=h) { func(x,&p,ye); printf("x = %8.3f err[0] = %10.5f err[1] = %10.5f\n", x,y[0]-ye[0],y[1]-ye[1]); dfunc(x,&p,y,dydx); rkstep(rk,h,x,y,dydx,y); } printf("\nRunga-Kutta with adaptive step size\n"); func(0.0,&p,y); for (ix=0,x=0.0,htry=h; ix<nx-1; ix++,x+=hdid,htry=hnext) { func(x,&p,ye); printf("x = %8.3f err[0] = %10.5f err[1] = %10.5f\n", x,y[0]-ye[0],y[1]-ye[1]); dfunc(x,&p,y,dydx); rkastep(rk,htry,&hdid,&hnext,eps,yscl,x,y,dydx,y); if (hdid==0.0) break; } }
void structSteepestDescentMinimizer :: v_minimize () { autoNUMvector<double> dp (1, nParameters); autoNUMvector<double> dpp (1, nParameters); double fret = func (object, p); while (iteration < maxNumOfIterations) { dfunc (object, p, dp.peek()); for (long i = 1; i <= nParameters; i++) { dpp[i] = - eta * dp[i] + momentum * dpp[i]; p[i] += dpp[i]; } history[++iteration] = minimum = func (object, p); success = 2.0 * fabs (fret - minimum) < tolerance * (fabs (fret) + fabs (minimum)); if (after) { try { after (this, aclosure); } catch (MelderError) { Melder_casual ("Interrupted after %ld iterations.", iteration); Melder_clearError (); break; } } if (success) { break; } fret = minimum; } }
void structVDSmagtMinimizer :: v_minimize () { int decrease_direction_found = 1; int l_iteration = 1; // David, dat is gevaarlijk: een locale variabele met dezelfde naam als een member; daarom hernoemd, maar is het correct? yes, we can iterate in steps, therefore local and global counter double rtemp, rtemp2; // df is estimate of function reduction obtainable during line search // restart = 2 => line search in direction of steepest descent // restart = 1 => line search with Powell-restart. // flag = 1 => no decrease in function value during previous line search; // flag = 2 => line search did not decrease gradient // OK; must restart if (restart_flag) { minimum = func (object, p); dfunc (object, p, dp); df = minimum; restart = 2; one_up = flag = 0; gcg0 = gopt_sq = 0.0; } restart_flag = 1; while (++ this -> iteration <= maxNumOfIterations) { if (flag & 1) { if (one_up) { decrease_direction_found = 0; this -> iteration --; break; } else { one_up = 1; } } else { one_up = 0; } if (flag & 2) { restart = 2; /* flag & 1 ??? */ } else if (fabs ( (double) gcg0) > 0.2 * gopt_sq) { restart = 1; } if (restart == 0) { rtemp = rtemp2 = 0.0; for (long i = 1; i <= nParameters; i++) { rtemp += gc[i] * grst[i]; rtemp2 += gc[i] * srst[i]; } gamma = rtemp / gamma_in; if (fabs (beta * gropt - gamma * rtemp2) > 0.2 * gopt_sq) { restart = 1; } else { for (long i = 1; i <= nParameters; i++) { s[i] = beta * s[i] + gamma * srst[i] - gc[i]; } } } if (restart == 2) { for (long i = 1; i <= nParameters; i++) { s[i] = - dp[i]; } restart = 1; } else if (restart == 1) { gamma_in = gropt - gr0; for (long i = 1; i <= nParameters; i++) { srst[i] = s[i]; s[i] = beta * s[i] - gc[i]; grst[i] = gc[i] - g0[i]; } restart = 0; } // Begin line search // lineSearch_iteration = #iterations during current line search flag = 0; lineSearch_iteration = 0; rtemp = 0.0; for (long i = 1; i <= nParameters; i++) { rtemp += dp[i] * s[i]; g0[i] = dp[i]; } gr0 = gropt = rtemp; if (l_iteration == 1) { alphamin = fabs (df / gropt); } if (gr0 > 0) { flag = 1; restart = 2; continue; } f0 = minimum; // alpha = length of step along line; // dalpha = change in alpha // alphamin = position of min along line alplim = -1; again = -1; rtemp = fabs (df / gropt); dalpha = alphamin < rtemp ? alphamin : rtemp; alphamin = 0; do { do { if (lineSearch_iteration) { if (! (fch == 0)) { gr2s += (temp + temp) / dalpha; } if (alplim < -0.5) { dalpha = 9.0 * alphamin; } else { dalpha = 0.5 * (alplim - alphamin); } grs = gropt + dalpha * gr2s; if (gropt * grs < 0) { dalpha *= gropt / (gropt - grs); } } alpha = alphamin + dalpha; for (long i = 1; i <= nParameters; i++) { pc[i] = p[i] + dalpha * s[i]; } fc = func (object, pc); dfunc (object, pc, gc); l_iteration ++; lineSearch_iteration++; gsq = grc = 0.0; for (long i = 1; i <= nParameters; i++) { gsq += gc[i] * gc[i]; grc += gc[i] * s[i]; } fch = fc - minimum; gr2s = (grc - gropt) / dalpha; temp = (fch + fch) / dalpha - grc - gropt; if ( (fc < minimum) || ( (fc == minimum) && (grc / gropt > -1))) { double *tmp; gopt_sq = gsq; history [this ->iteration] = minimum = fc; tmp = p; p = pc; pc = tmp; tmp = dp; dp = gc; gc = tmp; if (grc *gropt <= 0) { alplim = alphamin; } alphamin = alpha; gropt = grc; dalpha = - dalpha; success = gsq < tolerance; if (after) { try { after (this, aclosure); } catch (MelderError) { Melder_casual ("Interrupted after %ld iterations.", this -> iteration); Melder_clearError (); break; } } if (success) { return; } if (fabs (gropt / gr0) < lineSearchGradient) { break; } } else { alplim = alpha; } } while (lineSearch_iteration <= lineSearchMaxNumOfIterations); fc = history [this -> iteration] = minimum; rtemp = 0.0; for (long i = 1; i <= nParameters; i++) { pc[i] = p[i]; gc[i] = dp[i]; rtemp += gc[i] * g0[i]; } gcg0 = rtemp; if (fabs (gropt - gr0) > tolerance) { beta = (gopt_sq - gcg0) / (gropt - gr0); if (fabs (beta * gropt) < 0.2 * gopt_sq) { break; } } again++; if (again > 0) { flag += 2; } } while (flag < 1); if (f0 <= minimum) { flag += 1; } df = gr0 * alphamin; } if (this -> iteration > maxNumOfIterations) { this -> iteration = maxNumOfIterations; } if (decrease_direction_found) { restart_flag = 0; } }
//------------------------------------------------------------------------------------------------------ eStatus ApproachTarget(cBlackBoard& bb) { auto me = *bb.mDictPerm.Get<ecs::cEntityWithData>("me"); auto tgt = *bb.mDictTemp.Get<ecs::cEntityWithData>("target"); pgn::ecs::cmp::cLocation * me_loc = me->second.Component<pgn::ecs::cmp::cLocation>(); pgn::ecs::cmp::cLocation * tgt_loc = tgt->second.Component<pgn::ecs::cmp::cLocation>(); const auto& world = mainecs()->TagusToEntities("World")->second->second.Component<ecs::cmp::cWorldData>(); const auto& lvl = world->mLevelMap.find(me_loc->mLevelId)->second; const auto& layout = lvl->second.Component<ecs::cmp::cLevelData>()->mLayout; std::function< float(const glm::ivec2&)> dfunc; // request difi pgn::ecs::cmp::cMapDiFi * tgt_difi = tgt->second.Component<pgn::ecs::cmp::cMapDiFi>(); if (!tgt_difi) { // make the distance function dfunc = [&](const glm::ivec2& off)->float{ auto p = me_loc->mPos + off; if (layout.Obstacles().InRange(p) && (!layout.Obstacles()(p))) return pgn::norm_2(p - tgt_loc->mPos); else return std::numeric_limits<float>::max(); }; } else { dfunc = [&](const glm::ivec2& off)->float{ auto pos = me_loc->mPos + off; auto pos_difi = pos - tgt_difi->mValue.CornerWcs(); if (layout.Obstacles()(pos)) return std::numeric_limits<float>::max(); else { if (tgt_difi->mValue.Data().InRange(pos_difi)) return tgt_difi->mValue.Data()(pos_difi); else return pgn::norm_2(pos - tgt_loc->mPos); } }; } // find the closest point to target auto iters = rl::cShapeCalc< rl::cBoxDistance>::Get(0, 1); auto best_it = iters.first; auto best_d = std::numeric_limits<float>::max(); for (auto it = iters.first; it != iters.second; ++it) { auto d = dfunc(*it); if (d < best_d) { best_d = d; best_it = it; } } if (best_d == std::numeric_limits<float>::max()) return eStatus::Failure; else { // TODO: MoveAdj action! use the iterator for the direction and the consumed MovePoints mainecs()->System<ecs::sys::cMoveAdj>()(me, *best_it); return eStatus::Success; } }
//------------------------------------------------------------------------------------------------------ eStatus FleeTarget(cBlackBoard& bb) { // TODO: better fleeing strategy: need longer-term thinking, not just next-square. // Planning ahead for N moves requires increasing intelligence. // easy: I need to do a search for a difi dist of N, starting at my current monster pos. auto me = *bb.mDictPerm.Get<ecs::cEntityWithData>("me"); auto tgt = *bb.mDictTemp.Get<ecs::cEntityWithData>("target"); pgn::ecs::cmp::cLocation * me_loc = me->second.Component<pgn::ecs::cmp::cLocation>(); pgn::ecs::cmp::cLocation * tgt_loc = tgt->second.Component<pgn::ecs::cmp::cLocation>(); const auto& world = mainecs()->TagusToEntities("World")->second->second.Component<ecs::cmp::cWorldData>(); const auto& lvl = world->mLevelMap.find(me_loc->mLevelId)->second; const auto& layout = lvl->second.Component<ecs::cmp::cLevelData>()->mLayout; std::function< float(const glm::ivec2&)> dfunc; // request difi pgn::ecs::cmp::cMapDiFi * tgt_difi = tgt->second.Component<pgn::ecs::cmp::cMapDiFi>(); if (!tgt_difi) { // make the distance function dfunc = [&](const glm::ivec2& off)->float{ auto p = me_loc->mPos + off; if (layout.Obstacles().InRange(p) && (!layout.Obstacles()(p))) return pgn::norm_2(p - tgt_loc->mPos); else return std::numeric_limits<float>::max(); }; } else { dfunc = [&](const glm::ivec2& off)->float{ auto pos = me_loc->mPos + off; auto pos_difi = pos - tgt_difi->mValue.CornerWcs(); if (layout.Obstacles()(pos)) return std::numeric_limits<float>::max(); else { if (tgt_difi->mValue.Data().InRange(pos_difi)) return tgt_difi->mValue.Data()(pos_difi); else return pgn::norm_2(pos - tgt_loc->mPos); } }; } // find the closest point to target auto iters = rl::cShapeCalc< rl::cBoxDistance>::Get(0, 1); auto best_it = iters.first; auto best_d = std::numeric_limits<float>::max(); for (auto it = iters.first; it != iters.second; ++it) { auto d = dfunc(*it); // change sign so we select the furthest distance // TODO: this might be crap with movecosts! if (d != std::numeric_limits<float>::max()) d = -d; if (d < best_d) { best_d = d; best_it = it; } } if (best_d == std::numeric_limits<float>::max()) return eStatus::Failure; else { // TODO: MoveAdj action! use the iterator for the direction and the consumed MovePoints mainecs()->System<ecs::sys::cMoveAdj>()(me, *best_it); return eStatus::Success; } }
cv::Mat WavefrontSensor::WavefrontSensing(const std::vector<cv::Mat>& d, const double& meanPowerNoise) { unsigned int numberOfZernikes = 20; //total number of zernikes to be considered int M = numberOfZernikes; int K = d.size(); cv::Mat Q2; //We introduce here the lineal relationship between parameter phases of each optical path partlyKnownDifferencesInPhaseConstraints(M, K, Q2); std::vector<cv::Mat> Q2_v = {Q2, cv::Mat::zeros(Q2.size(), Q2.type())}; cv::Mat LEC; //Linear equality constraints cv::merge(Q2_v, LEC); //Build also the complex version of Q2 //process each patch independently cv::Mat dd; std::vector<cv::Mat> d_w; std::vector<Metric> mtrc_v; std::vector<std::pair<cv::Range,cv::Range> > rngs; unsigned int pixelsBetweenTiles = (int)(d.front().cols); unsigned int tileSize = 34; OpticalSetup tsettings( tileSize ); std::shared_ptr<Zernike> zrnk = std::make_shared<Zernike>(tsettings.pupilRadiousPixels(), tileSize, numberOfZernikes); divideIntoTiles(d.front().size(), pixelsBetweenTiles, tileSize, rngs); //Random row selector: Pick incoherent measurements cv::Mat eye_nn = cv::Mat::eye(K*tileSize*tileSize, K*tileSize*tileSize, cv::DataType<double>::type); unsigned int a = 400; //number of incoheren measurements cv::Mat shuffle_eye; shuffleRows(eye_nn, shuffle_eye); //Split 'a' into rngs.size() pieces std::vector<cv::Mat> A_v = {shuffle_eye(cv::Range(0, a), cv::Range::all()), cv::Mat::zeros(a, K*tileSize*tileSize, cv::DataType<double>::type)}; cv::Mat A; cv::merge(A_v, A); std::cout << "Number of anisoplanatic patches to annalize at once: " << rngs.size() << std::endl; for(auto rng_i : rngs) { cv::Mat d_col; //get ready dataset format std::vector<cv::Mat> D; std::vector<cv::Mat> d_col_v; for(cv::Mat di : d) { cv::Mat Di; cv::dft(di(rng_i.first, rng_i.second), Di, cv::DFT_COMPLEX_OUTPUT + cv::DFT_SCALE); fftShift(Di); D.push_back(Di); cv::Mat Di_t(Di.t()); d_col_v.push_back(Di_t.reshape(0, Di_t.total() )); } cv::vconcat(d_col_v, d_col); cv::gemm(A, d_col, 1.0, cv::Mat(), 1.0, d_col); //Picks rows randomly d_w.push_back( d_col ); mtrc_v.push_back( Metric(D, zrnk, meanPowerNoise) ); } cv::vconcat(d_w, dd); //-----------------------BY MEANS OF CONVEX OPTIMIZATION: //Objective function and gradient of the objective function if(false) { for(auto mtrc : mtrc_v) { std::function<double(cv::Mat)> func = std::bind(&Metric::objective, &mtrc, std::placeholders::_1); std::function<cv::Mat(cv::Mat)> dfunc = std::bind(&Metric::gradient, &mtrc, std::placeholders::_1); ConvexOptimization minimizationKit; cv::Mat x0_conv = cv::Mat::zeros(M*K, 1, cv::DataType<double>::type); //reset starting point //Lambda function that turn minimize function + constraints problem into minimize function lower dimension problem auto F_constrained = [] (cv::Mat x, std::function<double(cv::Mat)> func, const cv::Mat& Q2) -> double { return func(Q2*x); }; auto DF_constrained = [] (cv::Mat x, std::function<cv::Mat(cv::Mat)> dfunc, const cv::Mat& Q2) -> cv::Mat { return Q2.t() * dfunc(Q2*x); }; std::function<double(cv::Mat)> f_constrained = std::bind(F_constrained, std::placeholders::_1, func, Q2); std::function<cv::Mat(cv::Mat)> df_constrained = std::bind(DF_constrained, std::placeholders::_1, dfunc, Q2); //Define a new starting point with lower dimensions after reduction with contraints cv::Mat p_constrained = Q2.t() * x0_conv; ConvexOptimization min; min.perform_BFGS(p_constrained, f_constrained, df_constrained); x0_conv = Q2 * p_constrained; //Go back to original dimensional std::cout << "mimumum: " << x0_conv.t() << std::endl; } std::cout << "END OF CONVEX OPTIMIZATION" << std::endl; } //-----------------------BY MEANS OF SPARSE RECOVERY: //Create phase_div bias: only for the case of two diversity images!! // cv::Mat phase_div = cv::Mat::zeros(rngs.size()*M*K, 1, cv::DataType<double>::type); // phase_div.at<double>(M + 3, 0) = tsettings.k() * 3.141592/(2.0*std::sqrt(3.0)); cv::Mat x0 = cv::Mat::zeros(rngs.size()*M*K, 1, cv::DataType<double>::type); //Starting point std::vector<double> gamma_v(M*K, 1.0); for(unsigned int count=0;count<600;++count) { std::vector<cv::Mat> x0_vvv; cv::split(x0, x0_vvv); x0_vvv.at(0).copyTo(x0); cv::Mat_<std::complex<double> > blockMatrix_M; std::vector<cv::Mat> De_v; for(unsigned int t=0; t < rngs.size(); ++t) { cv::Mat jacob_i; mtrc_v.at(t).jacobian( x0(cv::Range(t*M*K, (t*M*K) + (M*K)), cv::Range::all()), jacob_i ); cv::gemm(A, jacob_i, 1.0, cv::Mat(), 1.0, jacob_i); //Picks rows randomly cv::gemm(jacob_i, LEC, 1.0, cv::Mat(), 1.0, jacob_i); //Apply constraints LECs cv::copyMakeBorder(blockMatrix_M, blockMatrix_M, 0, jacob_i.size().height, 0, jacob_i.size().width, cv::BORDER_CONSTANT, cv::Scalar(0.0, 0.0) ); cv::Rect rect(cv::Point(t*jacob_i.size().width, t*jacob_i.size().height), jacob_i.size() ); jacob_i.copyTo(blockMatrix_M( rect )); cv::Mat De_i; mtrc_v.at(t).phi( x0(cv::Range(t*M*K, (t*M*K) + (M*K)), cv::Range::all()), De_i ); cv::gemm(A, De_i, 1.0, cv::Mat(), 1.0, De_i); //Picks rows randomly De_v.push_back( De_i ); } cv::Mat De; cv::vconcat(De_v, De); std::vector<cv::Mat> x0_v = {x0, cv::Mat::zeros(x0.size(), x0.type())}; cv::merge(x0_v, x0); //Apply algorithm to get solution unsigned int blkLen = rngs.size(); cv::Mat blockMatrix_M_r; reorderColumns(blockMatrix_M, M, blockMatrix_M_r); //reorder columns so correlated data form a single block gamma_v = std::vector<double>(M*K, 1.0); //cv::Mat coeffs = perform_BSBL(blockMatrix_M_r, dd - De, NoiseLevel::Noiseless, gamma_v, blkLen); //Noiseless, LittleNoise //cv::Mat coeffs = perform_SBL(blockMatrix_M_r, dd - De, NoiseLevel::Noiseless, gamma_v); //Noiseless, LittleNoise cv::Mat coeffs = perform_projection(blockMatrix_M_r, dd - De); //Noiseless, LittleNoise cv::Mat coeffs_r; reorderColumns(coeffs.t(), blockMatrix_M.cols/M, coeffs_r); cv::Mat coeffs_r_n(coeffs_r.t()); //Undo constraints cv::Mat sol = cv::Mat::zeros(x0.size(), cv::DataType<std::complex<double> >::type); for(unsigned int t=0; t < rngs.size(); ++t) { cv::Mat sol_i; cv::gemm(LEC, coeffs_r_n(cv::Range(t*LEC.cols, (t*LEC.cols) + (LEC.cols)), cv::Range::all()), 1.0, cv::Mat(), 1.0, sol_i); sol_i.copyTo(sol(cv::Range(t*M*K, (t*M*K) + (M*K)), cv::Range::all())); } std::cout << "cv::norm(sol): " << cv::norm(sol) << std::endl; if(cv::norm(sol) < 1e-4 ) {std::cout << "Solution found" << std::endl; break;} x0 = x0 - sol; std::cout << "Solution number: " << count << std::endl; std::cout << "x0: " << x0.t() << std::endl; } return cv::Mat(); //mtrc.F(); }