int main() { // Seed with a real random value, if available pcg_extras::seed_seq_from<std::random_device> seed_source; // Make a random number engine pcg32 rng(seed_source); // Choose a random mean between 1 and 6 std::uniform_int_distribution<int> uniform_dist(1, 6); int mean = uniform_dist(rng); std::cout << "Randomly-chosen mean: " << mean << '\n'; // Generate a normal distribution around that mean std::normal_distribution<> normal_dist(mean, 2); // Make a copy of the RNG state to use later pcg32 rng_checkpoint = rng; std::map<int, int> hist; for (int n = 0; n < 10000; ++n) { ++hist[std::round(normal_dist(rng))]; } std::cout << "Normal distribution around " << mean << ":\n"; for (auto p : hist) { std::cout << std::fixed << std::setprecision(1) << std::setw(2) << p.first << ' ' << std::string(p.second/30, '*') << '\n'; } std::cout << "Required " << (rng - rng_checkpoint) << " random numbers.\n"; }
int main() { // Seed with a real random value, if available // Semilla con un valor random real, si existe std::random_device rd; // Choose a random mean between 1 and 6 std::default_random_engine e1(rd()); std::uniform_int_distribution<int> uniform_dist(1, 100); int mean = uniform_dist(e1); std::cout << "Randomly-chosen mean: " << mean << '\n'; // Generate a normal distribution around that mean std::mt19937 e2(rd()); std::normal_distribution<> normal_dist(mean, 2); std::map<int, int> hist; for (int n = 0; n < 10000; ++n) { ++hist[std::round(normal_dist(e2))]; } std::cout << "Normal distribution around " << mean << ":\n"; for (auto p : hist) { std::cout << std::fixed << std::setprecision(1) << std::setw(2) << p.first << ' ' << std::string(p.second/200, '*') << '\n'; } }
/** * Generate a gaussian random variable with zero mean and unit * variance. */ inline double gaussian(const double mean = double(0), const double stdev = double(1)) { boost::normal_distribution<double> normal_dist(mean,stdev); mut.lock(); const double result = normal_dist(real_rng); mut.unlock(); return result; } // end of gaussian
dynamo::Vector getRandVelVec() { //See http://mathworld.wolfram.com/SpherePointPicking.html std::normal_distribution<> normal_dist(0.0, (1.0 / sqrt(double(NDIM)))); dynamo::Vector tmpVec; for (size_t iDim = 0; iDim < NDIM; iDim++) tmpVec[iDim] = normal_dist(RNG); return tmpVec; }
void make_kernel(float *kern, float vsize, float fwhm, int size, int type) { int kindex,k; float c,r,max; (void)memset(kern,(int)0,(size_t)((2*size+1)*sizeof(float))); for ( k = -size/2; k<size/2; ++k) { kindex = ((k + size) % size)*2 +1; switch (type) { case KERN_GAUSSIAN: kern[kindex] = normal_dist(1.0*vsize,fwhm,0.0,(float)(vsize*k)); break; case KERN_RECT: kern[kindex] = rect_dist(1.0*vsize,fwhm,0.0,(float)(vsize*k)); break; default: { (void) fprintf (stderr,"Illegal kernel type = %d\n",type); (void) fprintf (stderr,"Impossible error in make_kernel(), line %d of %s\n", __LINE__,__FILE__); k = size/2; } } } }
ParticleEventData DynCompression::runAndersenWallCollision(Particle& part, const Vector & vNorm, const double& sqrtT, const double d) const { updateParticle(part); if (hasOrientationData()) M_throw() << "Need to implement thermostating of the rotational degrees" " of freedom"; //This gives a completely new random unit vector with a properly //distributed Normal component. See Granular Simulation Book ParticleEventData tmpDat(part, *Sim->species[part], WALL); double mass = Sim->species[tmpDat.getSpeciesID()]->getMass(part.getID()); std::normal_distribution<> normal_dist; std::uniform_real_distribution<> uniform_dist; for (size_t iDim = 0; iDim < NDIM; iDim++) part.getVelocity()[iDim] = normal_dist(Sim->ranGenerator) * sqrtT / std::sqrt(mass); part.getVelocity() //This first line adds a component in the direction of the normal += vNorm * (sqrtT * sqrt(-2.0*log(1.0 - uniform_dist(Sim->ranGenerator)) / mass) //This removes the original normal component -(part.getVelocity() | vNorm) //This adds on the velocity of the wall + d * growthRate) ; return tmpDat; }
double generate_normalized( double mean, double sigma, double crop_min, double crop_max ) { TRACE_MESSAGE(TRACE_LEVEL_VERBOSE, "+sola::component::random::generate_normalized"); SERIALIZE_CALL(std::recursive_mutex, __random_lock); double result; std::normal_distribution<double> normal_dist(mean, sigma); if(!__random_initialized) { THROW_RANDOM_EXCEPTION(RANDOM_EXCEPTION_UNINITIALIZED); } if(sigma < 0.0) { THROW_RANDOM_EXCEPTION_MESSAGE( RANDOM_EXCEPTION_INVALID_PARAMETER, "spread < 0.0" ); } if(crop_min > crop_max) { THROW_RANDOM_EXCEPTION_MESSAGE( RANDOM_EXCEPTION_INVALID_PARAMETER, "crop_min > crop_max" ); } result = normal_dist(__random_generator); if(result < crop_min) { result = crop_min; } else if(result > crop_max) { result = crop_max; } TRACE_MESSAGE(TRACE_LEVEL_VERBOSE, "-sola::component::random::generate_normalized, result. " << result); return result; }
int test() { //tbb::task_scheduler_init init; // Automatic number of threads tbb::task_scheduler_init init(tbb::task_scheduler_init::default_num_threads()); // Explicit number of threads // std::random_device rd; std::mt19937 e2; // (rd()); std::normal_distribution<> normal_dist(1000., 200.); thread_local std::vector<float> v; int kk = 0; int n = 2000; int res[n]; int qq[n]; for (auto& y : qq) y = std::max(0., normal_dist(e2)); auto theLoop = [&](int i) { kk++; v.reserve(res[i] = localRA.upper()); v.resize(qq[i]); localRA.update(v.size()); decltype(v) t; swap(v, t); }; tbb::parallel_for(tbb::blocked_range<size_t>(0, n), [&](const tbb::blocked_range<size_t>& r) { for (size_t i = r.begin(); i < r.end(); ++i) theLoop(i); }); auto mm = std::max_element(res, res + n); std::cout << kk << ' ' << localRA.m_curr << ' ' << localRA.mean() << std::endl; for (auto& i : localRA.m_buffer) std::cout << i << ' '; std::cout << std::endl; std::cout << std::accumulate(res, res + n, 0) / n << ' ' << *std::min_element(res + 16, res + n) << ',' << *mm << std::endl; return 0; }
float getRandomDistribution(float mean, float deviation) { static std::normal_distribution<float> normal_dist(mean, deviation); return normal_dist(g_randomEngine, std::normal_distribution<float>::param_type(mean, deviation)); }
void generateSamples() // roughly 87 us per sample { //sparse_cholesky_solver.matrixL(); // unconditioned sample = Pinv * L^T^-1 * eps boost::mt19937 rng; boost::normal_distribution<> normal_dist(0.0, 1.0); boost::variate_generator<boost::mt19937, boost::normal_distribution<> > gaussian(rng, normal_dist); const int num_samples = 24; Eigen::MatrixXd normal_samples = Eigen::MatrixXd::Zero(size, num_samples); for (int i=0; i<size; ++i) { for (int j=0; j<num_samples; ++j) { normal_samples(i,j) = gaussian(); } } Eigen::MatrixXd unconditioned_samples, conditioned_samples; // sparse method int reps = 1000; { printf("%d reps of sparse sampling: ", reps); boost::progress_timer t; // find the unconditioned samples for (int i=0; i<reps; ++i) { unconditioned_samples = sparse_cholesky_solver.permutationPinv() * sparse_cholesky_solver.matrixU().triangularView<Eigen::Upper>().solve(normal_samples); } } reps = 1000; { printf("%d reps of dense sample projection: ", reps); boost::progress_timer t; for (int i=0; i<reps; ++i) { conditioned_samples = unconditioned_samples - dense_projector * (dense_C.transpose() * unconditioned_samples); } } // dense method { // find the unconditioned samples // unconditioned_samples = // dense_cholesky_solver.matrixU().triangularView<Eigen::Upper>().solve(normal_samples); } // find the conditioned samples // Eigen::MatrixXd constraint_violations = dense_C.transpose() * unconditioned_samples; // num_constraints x num_samples // Eigen::MatrixXd conditioned_samples = unconditioned_samples - // dense_projector * constraint_violations; // printf("dense projector = %ld x %ld\n", dense_projector.rows(), dense_projector.cols()); Eigen::MatrixXd constraint_violations = dense_C.transpose() * unconditioned_samples; // num_constraints x num_samples // printf("unconditioned samples = %ld x %ld\n", unconditioned_samples.rows(), unconditioned_samples.cols()); printf("Constraint violations before = %f\n", constraint_violations.norm()); constraint_violations = dense_C.transpose() * conditioned_samples; // num_constraints x num_samples // printf("conditioned samples = %ld x %ld\n", conditioned_samples.rows(), conditioned_samples.cols()); printf("Constraint violations after = %f\n", constraint_violations.norm()); }
/// Returns a probability < 0.5 for negative beta and a probability > 0.5 for positive beta. inline double Phi(double beta) { return boost::math::cdf(normal_dist(0.,1.), beta); }