Number RBEIMTheta::evaluate(const RBParameters& mu) { if(mu.n_parameters() > rb_eim_eval.get_n_params()) { // In this case the parameters related to the EIM are a subset of // the parameters from the associated RB problem, hence we need to "pull out" // the parameters related to the EIM RBParameters mu_eim; RBParameters::const_iterator it = rb_eim_eval.get_parameters().begin(); RBParameters::const_iterator it_end = rb_eim_eval.get_parameters().end(); for( ; it != it_end; ++it) { std::string param_name = it->first; mu_eim.set_value(param_name, mu.get_value(param_name)); } rb_eim_eval.set_parameters(mu_eim); } else { rb_eim_eval.set_parameters(mu); } rb_eim_eval.rb_solve(rb_eim_eval.get_n_basis_functions()); return rb_eim_eval.RB_solution(index); }
void scale_mesh_and_plot(EquationSystems & es, const RBParameters & mu, const std::string & filename) { // Loop over the mesh nodes and move them! MeshBase & mesh = es.get_mesh(); MeshBase::node_iterator node_it = mesh.nodes_begin(); const MeshBase::node_iterator node_end = mesh.nodes_end(); for ( ; node_it != node_end; node_it++) { Node * node = *node_it; (*node)(0) *= mu.get_value("x_scaling"); } // Post-process the solution to compute the stresses compute_stresses(es); #ifdef LIBMESH_HAVE_EXODUS_API ExodusII_IO (mesh).write_equation_systems (filename, es); #endif // Loop over the mesh nodes and move them! node_it = mesh.nodes_begin(); for ( ; node_it != node_end; node_it++) { Node * node = *node_it; (*node)(0) /= mu.get_value("x_scaling"); } }
bool RBParametrized::valid_params(const RBParameters& params) { if(params.n_parameters() != get_n_params()) { libMesh::out << "Error: Number of parameters don't match" << std::endl; libmesh_error(); return false; } else { bool valid = true; RBParameters::const_iterator it = params.begin(); RBParameters::const_iterator it_end = params.end(); for( ; it != it_end; ++it) { std::string param_name = it->first; valid = valid && ( (get_parameter_min(param_name) <= params.get_value(param_name)) && (params.get_value(param_name) <= get_parameter_max(param_name)) ); } if(!valid && verbose_mode) { libMesh::out << "Warning: parameter is outside parameter range" << std::endl; } return valid; } }
RBParameters RBConstructionBase<Base>::get_params_from_training_set(unsigned int index) { libmesh_assert(training_parameters_initialized); libmesh_assert( (this->get_first_local_training_index() <= index) && (index < this->get_last_local_training_index()) ); RBParameters params; std::map< std::string, NumericVector<Number>* >::const_iterator it = training_parameters.begin(); std::map< std::string, NumericVector<Number>* >::const_iterator it_end = training_parameters.end(); for( ; it != it_end; ++it) { std::string param_name = it->first; Real param_value = libmesh_real( ( *(it->second) )(index) ); params.set_value(param_name, param_value); } return params; }
void RBParametrized::initialize_parameters(const RBParameters& mu_min_in, const RBParameters& mu_max_in, const RBParameters& mu_in) { // Check that the min/max vectors are valid { const std::string err_string = "Error: Invalid mu_min/mu_max in RBParameters constructor."; bool valid_min_max = (mu_min_in.n_parameters() == mu_max_in.n_parameters()); if(!valid_min_max) { libMesh::err << err_string << std::endl; } else { RBParameters::const_iterator it = mu_min_in.begin(); RBParameters::const_iterator it_end = mu_min_in.end(); for( ; it != it_end; ++it) { std::string param_name = it->first; if(mu_min_in.get_value(param_name) > mu_max_in.get_value(param_name)) { libMesh::err << err_string << std::endl; } } } } parameters_min = mu_min_in; parameters_max = mu_max_in; parameters_initialized = true; set_parameters(mu_in); }
void RBConstructionBase<Base>::generate_training_parameters_deterministic(const Parallel::Communicator &communicator, std::map<std::string, bool> log_param_scale, std::map< std::string, NumericVector<Number>* >& training_parameters_in, unsigned int n_training_samples_in, const RBParameters& min_parameters, const RBParameters& max_parameters, bool serial_training_set) { libmesh_assert_equal_to ( min_parameters.n_parameters(), max_parameters.n_parameters() ); const unsigned int num_params = min_parameters.n_parameters(); if (num_params == 0) return; if(num_params > 2) { libMesh::out << "ERROR: Deterministic training sample generation " << " not implemented for more than two parameters." << std::endl; libmesh_not_implemented(); } // Clear training_parameters_in { std::map< std::string, NumericVector<Number>* >::iterator it = training_parameters_in.begin(); std::map< std::string, NumericVector<Number>* >::const_iterator it_end = training_parameters_in.end(); for ( ; it != it_end; ++it) { NumericVector<Number>* training_vector = it->second; delete training_vector; training_vector = NULL; } } // Initialize training_parameters_in { RBParameters::const_iterator it = min_parameters.begin(); RBParameters::const_iterator it_end = min_parameters.end(); for( ; it != it_end; ++it) { std::string param_name = it->first; training_parameters_in[param_name] = NumericVector<Number>::build(communicator).release(); if(!serial_training_set) { // Calculate the number of training parameters local to this processor unsigned int n_local_training_samples; unsigned int quotient = n_training_samples_in/communicator.size(); unsigned int remainder = n_training_samples_in%communicator.size(); if(communicator.rank() < remainder) n_local_training_samples = (quotient + 1); else n_local_training_samples = quotient; training_parameters_in[param_name]->init(n_training_samples_in, n_local_training_samples, false, PARALLEL); } else { training_parameters_in[param_name]->init(n_training_samples_in, false, SERIAL); } } } if(num_params == 1) { NumericVector<Number>* training_vector = training_parameters_in.begin()->second; bool use_log_scaling = log_param_scale.begin()->second; Real min_param = min_parameters.begin()->second; Real max_param = max_parameters.begin()->second; numeric_index_type first_index = training_vector->first_local_index(); for(numeric_index_type i=0; i<training_vector->local_size(); i++) { numeric_index_type index = first_index+i; if(use_log_scaling) { Real epsilon = 1.e-6; // Prevent rounding errors triggering asserts Real log_min = log10(min_param + epsilon); Real log_range = log10( (max_param-epsilon) / (min_param+epsilon) ); Real step_size = log_range / std::max((unsigned int)1,(n_training_samples_in-1)); if(index<(n_training_samples_in-1)) { training_vector->set(index, pow(10., log_min + index*step_size )); } else { // due to rounding error, the last parameter can be slightly // bigger than max_parameters, hence snap back to the max training_vector->set(index, max_param); } } else { // Generate linearly scaled training parameters Real step_size = (max_param - min_param) / std::max((unsigned int)1,(n_training_samples_in-1)); training_vector->set(index, index*step_size + min_param); } } } // This is for two parameters if(num_params == 2) { // First make sure n_training_samples_in is a square number unsigned int n_training_parameters_per_var = static_cast<unsigned int>( std::sqrt(static_cast<Real>(n_training_samples_in)) ); if( (n_training_parameters_per_var*n_training_parameters_per_var) != n_training_samples_in) libmesh_error_msg("Error: Number of training parameters = " \ << n_training_samples_in \ << ".\n" \ << "Deterministic training set generation with two parameters requires\n " \ << "the number of training parameters to be a perfect square."); // make a matrix to store all the parameters, put them in vector form afterwards std::vector< std::vector<Real> > training_parameters_matrix(num_params); RBParameters::const_iterator it = min_parameters.begin(); RBParameters::const_iterator it_end = min_parameters.end(); unsigned int i = 0; for( ; it != it_end; ++it) { std::string param_name = it->first; Real min_param = it->second; bool use_log_scaling = log_param_scale[param_name]; Real max_param = max_parameters.get_value(param_name); training_parameters_matrix[i].resize(n_training_parameters_per_var); for(unsigned int j=0; j<n_training_parameters_per_var; j++) { // Generate log10 scaled training parameters if(use_log_scaling) { Real epsilon = 1.e-6; // Prevent rounding errors triggering asserts Real log_min = log10(min_param + epsilon); Real log_range = log10( (max_param-epsilon) / (min_param+epsilon) ); Real step_size = log_range / std::max((unsigned int)1,(n_training_parameters_per_var-1)); if(j<(n_training_parameters_per_var-1)) { training_parameters_matrix[i][j] = pow(10., log_min + j*step_size ); } else { // due to rounding error, the last parameter can be slightly // bigger than max_parameters, hence snap back to the max training_parameters_matrix[i][j] = max_param; } } else { // Generate linearly scaled training parameters Real step_size = (max_param - min_param) / std::max((unsigned int)1,(n_training_parameters_per_var-1)); training_parameters_matrix[i][j] = j*step_size + min_param; } } i++; } // now load into training_samples_in: std::map<std::string, NumericVector<Number>*>::iterator new_it = training_parameters_in.begin(); NumericVector<Number>* training_vector_0 = new_it->second; ++new_it; NumericVector<Number>* training_vector_1 = new_it->second; for(unsigned int index1=0; index1<n_training_parameters_per_var; index1++) { for(unsigned int index2=0; index2<n_training_parameters_per_var; index2++) { unsigned int index = index1*n_training_parameters_per_var + index2; if( (training_vector_0->first_local_index() <= index) && (index < training_vector_0->last_local_index()) ) { training_vector_0->set(index, training_parameters_matrix[0][index1]); training_vector_1->set(index, training_parameters_matrix[1][index2]); } } } // libMesh::out << "n_training_samples = " << n_training_samples_in << std::endl; // for(unsigned int index=0; index<n_training_samples_in; index++) // { // libMesh::out << "training parameters for index="<<index<<":"<<std::endl; // for(unsigned int param=0; param<num_params; param++) // { // libMesh::out << " " << (*training_parameters_in[param])(index); // } // libMesh::out << std::endl << std::endl; // } } }
void RBConstructionBase<Base>::generate_training_parameters_random(const Parallel::Communicator &communicator, std::map<std::string, bool> log_param_scale, std::map< std::string, NumericVector<Number>* >& training_parameters_in, unsigned int n_training_samples_in, const RBParameters& min_parameters, const RBParameters& max_parameters, int training_parameters_random_seed, bool serial_training_set) { libmesh_assert_equal_to ( min_parameters.n_parameters(), max_parameters.n_parameters() ); const unsigned int num_params = min_parameters.n_parameters(); // Clear training_parameters_in { std::map< std::string, NumericVector<Number>* >::iterator it = training_parameters_in.begin(); std::map< std::string, NumericVector<Number>* >::const_iterator it_end = training_parameters_in.end(); for ( ; it != it_end; ++it) { NumericVector<Number>* training_vector = it->second; delete training_vector; training_vector = NULL; } training_parameters_in.clear(); } if (num_params == 0) return; if (training_parameters_random_seed < 0) { if(!serial_training_set) { // seed the random number generator with the system time // and the processor ID so that the seed is different // on different processors std::srand( static_cast<unsigned>( std::time(0)*(1+communicator.rank()) )); } else { // seed the random number generator with the system time // only so that the seed is the same on all processors std::srand( static_cast<unsigned>( std::time(0) )); } } else { if(!serial_training_set) { // seed the random number generator with the provided value // and the processor ID so that the seed is different // on different processors std::srand( static_cast<unsigned>( training_parameters_random_seed*(1+communicator.rank()) )); } else { // seed the random number generator with the provided value // so that the seed is the same on all processors std::srand( static_cast<unsigned>( training_parameters_random_seed )); } } // initialize training_parameters_in { RBParameters::const_iterator it = min_parameters.begin(); RBParameters::const_iterator it_end = min_parameters.end(); for( ; it != it_end; ++it) { std::string param_name = it->first; training_parameters_in[param_name] = NumericVector<Number>::build(communicator).release(); if(!serial_training_set) { // Calculate the number of training parameters local to this processor unsigned int n_local_training_samples; unsigned int quotient = n_training_samples_in/communicator.size(); unsigned int remainder = n_training_samples_in%communicator.size(); if(communicator.rank() < remainder) n_local_training_samples = (quotient + 1); else n_local_training_samples = quotient; training_parameters_in[param_name]->init(n_training_samples_in, n_local_training_samples, false, PARALLEL); } else { training_parameters_in[param_name]->init(n_training_samples_in, false, SERIAL); } } } // finally, set the values { std::map< std::string, NumericVector<Number>* >::iterator it = training_parameters_in.begin(); std::map< std::string, NumericVector<Number>* >::const_iterator it_end = training_parameters_in.end(); for( ; it != it_end; ++it) { std::string param_name = it->first; NumericVector<Number>* training_vector = it->second; numeric_index_type first_index = training_vector->first_local_index(); for(numeric_index_type i=0; i<training_vector->local_size(); i++) { numeric_index_type index = first_index + i; Real random_number = ((double)std::rand())/RAND_MAX; // in range [0,1] // Generate log10 scaled training parameters if(log_param_scale[param_name]) { Real log_min = log10(min_parameters.get_value(param_name)); Real log_range = log10(max_parameters.get_value(param_name) / min_parameters.get_value(param_name)); training_vector->set(index, pow(10., log_min + random_number*log_range ) ); } // Generate linearly scaled training parameters else { training_vector->set(index, random_number*(max_parameters.get_value(param_name) - min_parameters.get_value(param_name)) + min_parameters.get_value(param_name)); } } } } }