예제 #1
0
void RBConstructionBase<Base>::get_global_max_error_pair(const Parallel::Communicator &communicator,
                                                         std::pair<unsigned int, Real>& error_pair)
{
  // Set error_pair.second to the maximum global value and also
  // find which processor contains the maximum value
  unsigned int proc_ID_index;
  communicator.maxloc(error_pair.second, proc_ID_index);

  // Then broadcast error_pair.first from proc_ID_index
  communicator.broadcast(error_pair.first, proc_ID_index);
}
예제 #2
0
Sort<KeyType,IdxType>::Sort(const Parallel::Communicator &comm_in,
                            std::vector<KeyType>& d) :
  ParallelObject(comm_in),
  _n_procs(comm_in.size()),
  _proc_id(comm_in.rank()),
  _bin_is_sorted(false),
  _data(d)
{
  std::sort(_data.begin(), _data.end());

  // Allocate storage
  _local_bin_sizes.resize(_n_procs);
}
예제 #3
0
std::string
PETScConfigurable::p_generatePrefix(const parallel::Communicator& comm)
{
  std::string result;
  if (comm.rank() == 0) {
    result = p_generatePrefix();
  }
  boost::mpi::broadcast(comm, result, 0);
  return result;
}
예제 #4
0
// ErrorEstimator functions
void ErrorEstimator::reduce_error (std::vector<ErrorVectorReal> & error_per_cell,
                                   const Parallel::Communicator & comm) const
{
  // This function must be run on all processors at once
  // parallel_object_only();

  // Each processor has now computed the error contributions
  // for its local elements.  We may need to sum the vector to
  // recover the error for each element.

  comm.sum(error_per_cell);
}
예제 #5
0
파일: MooseUtils.C 프로젝트: markr622/moose
void
parallelBarrierNotify(const Parallel::Communicator & comm)
{
  processor_id_type slave_processor_id;

  if (comm.rank() == 0)
  {
    // The master process is already through, so report it
    Moose::out << "Jobs complete: 1/" << comm.size() << (1 == comm.size() ? "\n" : "\r") << std::flush;
    for (unsigned int i=2; i<=comm.size(); ++i)
    {
      comm.receive(MPI_ANY_SOURCE, slave_processor_id);
      Moose::out << "Jobs complete: " << i << "/" << comm.size() << (i == comm.size() ? "\n" : "\r") << std::flush;
    }
  }
  else
  {
    slave_processor_id = comm.rank();
    comm.send(0, slave_processor_id);
  }

  comm.barrier();
}
예제 #6
0
void RBConstructionBase<Base>::generate_training_parameters_deterministic(const Parallel::Communicator &communicator,
                                                                          std::map<std::string, bool> log_param_scale,
                                                                          std::map< std::string, NumericVector<Number>* >& training_parameters_in,
                                                                          unsigned int n_training_samples_in,
                                                                          const RBParameters& min_parameters,
                                                                          const RBParameters& max_parameters,
                                                                          bool serial_training_set)
{
  libmesh_assert_equal_to ( min_parameters.n_parameters(), max_parameters.n_parameters() );
  const unsigned int num_params = min_parameters.n_parameters();

  if (num_params == 0)
    return;

  if(num_params > 2)
    {
      libMesh::out << "ERROR: Deterministic training sample generation "
                   << " not implemented for more than two parameters." << std::endl;
      libmesh_not_implemented();
    }

  // Clear training_parameters_in
  {
    std::map< std::string, NumericVector<Number>* >::iterator it           = training_parameters_in.begin();
    std::map< std::string, NumericVector<Number>* >::const_iterator it_end = training_parameters_in.end();

    for ( ; it != it_end; ++it)
      {
        NumericVector<Number>* training_vector = it->second;
        delete training_vector;
        training_vector = NULL;
      }
  }

  // Initialize training_parameters_in
  {
    RBParameters::const_iterator it     = min_parameters.begin();
    RBParameters::const_iterator it_end = min_parameters.end();
    for( ; it != it_end; ++it)
      {
        std::string param_name = it->first;
        training_parameters_in[param_name] = NumericVector<Number>::build(communicator).release();

        if(!serial_training_set)
          {
            // Calculate the number of training parameters local to this processor
            unsigned int n_local_training_samples;
            unsigned int quotient  = n_training_samples_in/communicator.size();
            unsigned int remainder = n_training_samples_in%communicator.size();
            if(communicator.rank() < remainder)
              n_local_training_samples = (quotient + 1);
            else
              n_local_training_samples = quotient;

            training_parameters_in[param_name]->init(n_training_samples_in, n_local_training_samples, false, PARALLEL);
          }
        else
          {
            training_parameters_in[param_name]->init(n_training_samples_in, false, SERIAL);
          }
      }
  }

  if(num_params == 1)
    {
      NumericVector<Number>* training_vector = training_parameters_in.begin()->second;
      bool use_log_scaling = log_param_scale.begin()->second;
      Real min_param = min_parameters.begin()->second;
      Real max_param = max_parameters.begin()->second;

      numeric_index_type first_index = training_vector->first_local_index();
      for(numeric_index_type i=0; i<training_vector->local_size(); i++)
        {
          numeric_index_type index = first_index+i;
          if(use_log_scaling)
            {
              Real epsilon = 1.e-6; // Prevent rounding errors triggering asserts
              Real log_min   = log10(min_param + epsilon);
              Real log_range = log10( (max_param-epsilon) / (min_param+epsilon) );
              Real step_size = log_range /
                std::max((unsigned int)1,(n_training_samples_in-1));

              if(index<(n_training_samples_in-1))
                {
                  training_vector->set(index, pow(10., log_min + index*step_size ));
                }
              else
                {
                  // due to rounding error, the last parameter can be slightly
                  // bigger than max_parameters, hence snap back to the max
                  training_vector->set(index, max_param);
                }
            }
          else
            {
              // Generate linearly scaled training parameters
              Real step_size = (max_param - min_param) /
                std::max((unsigned int)1,(n_training_samples_in-1));
              training_vector->set(index, index*step_size + min_param);
            }
        }
    }


  // This is for two parameters
  if(num_params == 2)
    {
      // First make sure n_training_samples_in is a square number
      unsigned int n_training_parameters_per_var = static_cast<unsigned int>( std::sqrt(static_cast<Real>(n_training_samples_in)) );
      if( (n_training_parameters_per_var*n_training_parameters_per_var) != n_training_samples_in)
        libmesh_error_msg("Error: Number of training parameters = " \
                          << n_training_samples_in \
                          << ".\n" \
                          << "Deterministic training set generation with two parameters requires\n " \
                          << "the number of training parameters to be a perfect square.");

      // make a matrix to store all the parameters, put them in vector form afterwards
      std::vector< std::vector<Real> > training_parameters_matrix(num_params);

      RBParameters::const_iterator it     = min_parameters.begin();
      RBParameters::const_iterator it_end = min_parameters.end();
      unsigned int i = 0;
      for( ; it != it_end; ++it)
        {
          std::string param_name = it->first;
          Real min_param         = it->second;
          bool use_log_scaling = log_param_scale[param_name];
          Real max_param = max_parameters.get_value(param_name);

          training_parameters_matrix[i].resize(n_training_parameters_per_var);

          for(unsigned int j=0; j<n_training_parameters_per_var; j++)
            {
              // Generate log10 scaled training parameters
              if(use_log_scaling)
                {
                  Real epsilon = 1.e-6; // Prevent rounding errors triggering asserts
                  Real log_min   = log10(min_param + epsilon);
                  Real log_range = log10( (max_param-epsilon) / (min_param+epsilon) );
                  Real step_size = log_range /
                    std::max((unsigned int)1,(n_training_parameters_per_var-1));

                  if(j<(n_training_parameters_per_var-1))
                    {
                      training_parameters_matrix[i][j] = pow(10., log_min + j*step_size );
                    }
                  else
                    {
                      // due to rounding error, the last parameter can be slightly
                      // bigger than max_parameters, hence snap back to the max
                      training_parameters_matrix[i][j] = max_param;
                    }
                }
              else
                {
                  // Generate linearly scaled training parameters
                  Real step_size = (max_param - min_param) /
                    std::max((unsigned int)1,(n_training_parameters_per_var-1));
                  training_parameters_matrix[i][j] = j*step_size + min_param;
                }

            }
          i++;
        }

      // now load into training_samples_in:
      std::map<std::string, NumericVector<Number>*>::iterator new_it = training_parameters_in.begin();

      NumericVector<Number>* training_vector_0 = new_it->second;
      ++new_it;
      NumericVector<Number>* training_vector_1 = new_it->second;

      for(unsigned int index1=0; index1<n_training_parameters_per_var; index1++)
        {
          for(unsigned int index2=0; index2<n_training_parameters_per_var; index2++)
            {
              unsigned int index = index1*n_training_parameters_per_var + index2;

              if( (training_vector_0->first_local_index() <= index) &&
                  (index < training_vector_0->last_local_index()) )
                {
                  training_vector_0->set(index, training_parameters_matrix[0][index1]);
                  training_vector_1->set(index, training_parameters_matrix[1][index2]);
                }
            }
        }

      //     libMesh::out << "n_training_samples = " << n_training_samples_in << std::endl;
      //     for(unsigned int index=0; index<n_training_samples_in; index++)
      //     {
      //         libMesh::out << "training parameters for index="<<index<<":"<<std::endl;
      //         for(unsigned int param=0; param<num_params; param++)
      //         {
      //           libMesh::out << " " << (*training_parameters_in[param])(index);
      //         }
      //         libMesh::out << std::endl << std::endl;
      //     }

    }
}
예제 #7
0
void RBConstructionBase<Base>::generate_training_parameters_random(const Parallel::Communicator &communicator,
                                                                   std::map<std::string, bool> log_param_scale,
                                                                   std::map< std::string, NumericVector<Number>* >& training_parameters_in,
                                                                   unsigned int n_training_samples_in,
                                                                   const RBParameters& min_parameters,
                                                                   const RBParameters& max_parameters,
                                                                   int training_parameters_random_seed,
                                                                   bool serial_training_set)
{
  libmesh_assert_equal_to ( min_parameters.n_parameters(), max_parameters.n_parameters() );
  const unsigned int num_params = min_parameters.n_parameters();

  // Clear training_parameters_in
  {
    std::map< std::string, NumericVector<Number>* >::iterator it           = training_parameters_in.begin();
    std::map< std::string, NumericVector<Number>* >::const_iterator it_end = training_parameters_in.end();

    for ( ; it != it_end; ++it)
      {
        NumericVector<Number>* training_vector = it->second;
        delete training_vector;
        training_vector = NULL;
      }
    training_parameters_in.clear();
  }

  if (num_params == 0)
    return;

  if (training_parameters_random_seed < 0)
    {
      if(!serial_training_set)
        {
          // seed the random number generator with the system time
          // and the processor ID so that the seed is different
          // on different processors
          std::srand( static_cast<unsigned>( std::time(0)*(1+communicator.rank()) ));
        }
      else
        {
          // seed the random number generator with the system time
          // only so that the seed is the same on all processors
          std::srand( static_cast<unsigned>( std::time(0) ));
        }
    }
  else
    {
      if(!serial_training_set)
        {
          // seed the random number generator with the provided value
          // and the processor ID so that the seed is different
          // on different processors
          std::srand( static_cast<unsigned>( training_parameters_random_seed*(1+communicator.rank()) ));
        }
      else
        {
          // seed the random number generator with the provided value
          // so that the seed is the same on all processors
          std::srand( static_cast<unsigned>( training_parameters_random_seed ));
        }
    }

  // initialize training_parameters_in
  {
    RBParameters::const_iterator it     = min_parameters.begin();
    RBParameters::const_iterator it_end = min_parameters.end();
    for( ; it != it_end; ++it)
      {
        std::string param_name = it->first;
        training_parameters_in[param_name] = NumericVector<Number>::build(communicator).release();

        if(!serial_training_set)
          {
            // Calculate the number of training parameters local to this processor
            unsigned int n_local_training_samples;
            unsigned int quotient  = n_training_samples_in/communicator.size();
            unsigned int remainder = n_training_samples_in%communicator.size();
            if(communicator.rank() < remainder)
              n_local_training_samples = (quotient + 1);
            else
              n_local_training_samples = quotient;

            training_parameters_in[param_name]->init(n_training_samples_in, n_local_training_samples, false, PARALLEL);
          }
        else
          {
            training_parameters_in[param_name]->init(n_training_samples_in, false, SERIAL);
          }
      }
  }

  // finally, set the values
  {
    std::map< std::string, NumericVector<Number>* >::iterator it           = training_parameters_in.begin();
    std::map< std::string, NumericVector<Number>* >::const_iterator it_end = training_parameters_in.end();

    for( ; it != it_end; ++it)
      {
        std::string param_name = it->first;
        NumericVector<Number>* training_vector = it->second;

        numeric_index_type first_index = training_vector->first_local_index();
        for(numeric_index_type i=0; i<training_vector->local_size(); i++)
          {
            numeric_index_type index = first_index + i;
            Real random_number = ((double)std::rand())/RAND_MAX; // in range [0,1]

            // Generate log10 scaled training parameters
            if(log_param_scale[param_name])
              {
                Real log_min   = log10(min_parameters.get_value(param_name));
                Real log_range = log10(max_parameters.get_value(param_name) / min_parameters.get_value(param_name));

                training_vector->set(index, pow(10., log_min + random_number*log_range ) );
              }
            // Generate linearly scaled training parameters
            else
              {
                training_vector->set(index, random_number*(max_parameters.get_value(param_name) - min_parameters.get_value(param_name))
                                     + min_parameters.get_value(param_name));
              }
          }
      }
  }
}